From 223d525a0e9df844b33c7e95a7d69752714537c0 Mon Sep 17 00:00:00 2001 From: Luca Comellini Date: Fri, 3 Nov 2023 07:42:31 -0700 Subject: [PATCH 1/4] Change busybox variant to musl (#4608) --- build/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/Dockerfile b/build/Dockerfile index 26f59a7bf7..54abbc6dab 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -69,7 +69,7 @@ ADD --link --chown=101:0 https://raw.githubusercontent.com/nginxinc/k8s-common/m ADD --link --chown=101:0 https://raw.githubusercontent.com/nginxinc/k8s-common/main/files/nap-waf-debian-11.repo nap-waf-11.sources ADD --link --chown=101:0 https://raw.githubusercontent.com/nginxinc/k8s-common/main/files/nap-dos-debian-11.repo nap-dos-11.sources -RUN --mount=from=busybox:uclibc,src=/bin/,dst=/bin/ printf "%s\n" "Acquire::https::pkgs.nginx.com::User-Agent \"k8s-ic-$IC_VERSION${BUILD_OS##debian-plus}-apt\";" >> 90pkgs-nginx \ +RUN --mount=from=busybox:musl,src=/bin/,dst=/bin/ printf "%s\n" "Acquire::https::pkgs.nginx.com::User-Agent \"k8s-ic-$IC_VERSION${BUILD_OS##debian-plus}-apt\";" >> 90pkgs-nginx \ && sed -i -e "s;%VERSION%;${NGINX_PLUS_VERSION};g" *.sources \ && sed -i -e "y/0/1/" -e "1,8s;/centos;/${NGINX_PLUS_VERSION}/centos;" *.repo From b036ba04e1290d570da5909c31b6da455fa8f18f Mon Sep 17 00:00:00 2001 From: Luca Comellini Date: Mon, 6 Nov 2023 12:52:43 -0800 Subject: [PATCH 2/4] Install QEMU for s390x (#4610) --- .github/workflows/build-plus.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-plus.yml b/.github/workflows/build-plus.yml index 82aa9747d8..0bfca6ce84 100644 --- a/.github/workflows/build-plus.yml +++ b/.github/workflows/build-plus.yml @@ -48,7 +48,7 @@ jobs: - name: Setup QEMU uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 with: - platforms: arm64 + platforms: arm64,s390x if: github.event_name != 'pull_request' - name: Docker Buildx From af4b47d77d988d86fb14bd5c4e9e6072662bb7a2 Mon Sep 17 00:00:00 2001 From: Luca Comellini Date: Tue, 7 Nov 2023 01:51:42 -0800 Subject: [PATCH 3/4] Revert "Generate mainfests automatically from Helm (#4278)" (#4618) This reverts commit 1d7c8bd89e74bc5a51f8f18ab04ebf5e023950f7. --- .github/workflows/build-plus.yml | 2 +- .github/workflows/release-pr.yml | 23 +- CONTRIBUTING.md | 3 +- Makefile | 4 - README.md | 2 +- charts/nginx-ingress/README.md | 22 +- .../templates/controller-daemonset.yaml | 4 +- .../templates/controller-deployment.yaml | 8 +- charts/nginx-ingress/values-nsm.yaml | 6 + charts/nginx-ingress/values.yaml | 6 +- config/base/kustomization.yaml | 5 - config/base/namespace.yaml | 4 - .../overlays/app-protect-dos/deployment.yaml | 43 -- .../app-protect-dos/kustomization.yaml | 9 - config/overlays/app-protect-dos/service.yaml | 20 - deploy/app-protect-dos/deploy.yaml | 443 ------------------ deploy/app-protect-waf/deploy.yaml | 378 --------------- deploy/aws-nlb/deploy.yaml | 371 --------------- deploy/azure/deploy.yaml | 367 --------------- deploy/crds | 1 - deploy/daemon-set/deploy.yaml | 364 -------------- deploy/default/deploy.yaml | 365 --------------- deploy/edge/deploy.yaml | 366 --------------- deploy/external-dns/deploy.yaml | 382 --------------- deploy/nginx-plus/deploy.yaml | 367 --------------- deploy/nodeport/deploy.yaml | 366 --------------- deploy/oidc/deploy.yaml | 367 --------------- deploy/openservicemesh/deploy.yaml | 366 --------------- deploy/read-only-fs/deploy.yaml | 375 --------------- deploy/service-insight/deploy.yaml | 365 --------------- {deploy => deployments}/README.md | 0 .../common/ingress-class.yaml | 0 .../common/nginx-config.yaml | 0 .../common/ns-and-sa.yaml | 0 .../daemon-set/nginx-ingress.yaml | 0 .../daemon-set/nginx-plus-ingress.yaml | 0 .../deployment/appprotect-dos-arb.yaml | 0 .../deployment/nginx-ingress.yaml | 0 .../deployment/nginx-plus-ingress.yaml | 0 {tests/data => deployments}/rbac/ap-rbac.yaml | 0 .../data => deployments}/rbac/apdos-rbac.yaml | 0 {tests/data => deployments}/rbac/rbac.yaml | 0 .../service/appprotect-dos-arb-svc.yaml | 0 deployments/service/loadbalancer-aws-elb.yaml | 21 + deployments/service/loadbalancer.yaml | 19 + deployments/service/nodeport.yaml | 18 + docs/content/app-protect-dos/installation.md | 13 +- docs/content/configuration/security.md | 63 ++- .../installation/installation-with-helm.md | 2 +- .../installation-with-manifests.md | 341 +++++++------- .../using-the-jwt-token-docker-secret.md | 10 +- .../troubleshooting/troubleshoot-common.md | 2 +- docs/content/tutorials/custom-listen-ports.md | 2 +- docs/content/tutorials/nginx-ingress-osm.md | 18 +- docs/content/usage-reporting.md | 2 +- .../service-insight/README.md | 21 +- examples/helm-chart/README.md | 44 -- .../helm-chart/app-protect-dos/values.yaml | 10 - .../helm-chart/app-protect-waf/values.yaml | 11 - examples/helm-chart/aws-nlb/values.yaml | 13 - examples/helm-chart/azure/values.yaml | 5 - examples/helm-chart/daemon-set/values.yaml | 3 - examples/helm-chart/default/values.yaml | 2 - examples/helm-chart/edge/values.yaml | 7 - examples/helm-chart/external-dns/values.yaml | 3 - examples/helm-chart/nginx-plus/values.yaml | 8 - examples/helm-chart/nodeport/values.yaml | 4 - examples/helm-chart/oidc/values.yaml | 9 - .../helm-chart/openservicemesh/values.yaml | 5 - examples/helm-chart/read-only-fs/values.yaml | 5 - .../helm-chart/service-insight/values.yaml | 5 - hack/generate-manifests.sh | 20 - perf-tests/suite/test_ap_reload_perf.py | 2 +- tests/Dockerfile | 4 +- tests/Makefile | 2 +- tests/settings.py | 1 + tests/suite/fixtures/fixtures.py | 12 +- tests/suite/fixtures/ic_fixtures.py | 18 +- tests/suite/test_ac_policies.py | 4 +- tests/suite/test_ac_policies_vsr.py | 4 +- tests/suite/test_annotations.py | 4 +- tests/suite/test_app_protect_integration.py | 2 +- .../test_app_protect_waf_policies_grpc.py | 4 +- tests/suite/test_custom_annotations.py | 4 +- tests/suite/test_default_server.py | 2 +- .../test_transport_server_external_name.py | 6 +- .../test_transport_server_service_insight.py | 2 +- tests/suite/test_ts_tls_passthrough.py | 4 +- tests/suite/test_v_s_route_grpc.py | 6 +- tests/suite/test_virtual_server.py | 6 +- .../test_virtual_server_configmap_keys.py | 4 +- tests/suite/test_virtual_server_grpc.py | 6 +- tests/suite/test_virtual_server_mixed_grpc.py | 6 +- tests/suite/utils/resources_utils.py | 10 +- 94 files changed, 421 insertions(+), 5782 deletions(-) create mode 100644 charts/nginx-ingress/values-nsm.yaml delete mode 100644 config/base/kustomization.yaml delete mode 100644 config/base/namespace.yaml delete mode 100644 config/overlays/app-protect-dos/deployment.yaml delete mode 100644 config/overlays/app-protect-dos/kustomization.yaml delete mode 100644 config/overlays/app-protect-dos/service.yaml delete mode 100644 deploy/app-protect-dos/deploy.yaml delete mode 100644 deploy/app-protect-waf/deploy.yaml delete mode 100644 deploy/aws-nlb/deploy.yaml delete mode 100644 deploy/azure/deploy.yaml delete mode 120000 deploy/crds delete mode 100644 deploy/daemon-set/deploy.yaml delete mode 100644 deploy/default/deploy.yaml delete mode 100644 deploy/edge/deploy.yaml delete mode 100644 deploy/external-dns/deploy.yaml delete mode 100644 deploy/nginx-plus/deploy.yaml delete mode 100644 deploy/nodeport/deploy.yaml delete mode 100644 deploy/oidc/deploy.yaml delete mode 100644 deploy/openservicemesh/deploy.yaml delete mode 100644 deploy/read-only-fs/deploy.yaml delete mode 100644 deploy/service-insight/deploy.yaml rename {deploy => deployments}/README.md (100%) rename {tests/data => deployments}/common/ingress-class.yaml (100%) rename {tests/data => deployments}/common/nginx-config.yaml (100%) rename {tests/data => deployments}/common/ns-and-sa.yaml (100%) rename {tests/data => deployments}/daemon-set/nginx-ingress.yaml (100%) rename {tests/data => deployments}/daemon-set/nginx-plus-ingress.yaml (100%) rename {tests/data => deployments}/deployment/appprotect-dos-arb.yaml (100%) rename {tests/data => deployments}/deployment/nginx-ingress.yaml (100%) rename {tests/data => deployments}/deployment/nginx-plus-ingress.yaml (100%) rename {tests/data => deployments}/rbac/ap-rbac.yaml (100%) rename {tests/data => deployments}/rbac/apdos-rbac.yaml (100%) rename {tests/data => deployments}/rbac/rbac.yaml (100%) rename {tests/data => deployments}/service/appprotect-dos-arb-svc.yaml (100%) create mode 100644 deployments/service/loadbalancer-aws-elb.yaml create mode 100644 deployments/service/loadbalancer.yaml create mode 100644 deployments/service/nodeport.yaml delete mode 100644 examples/helm-chart/README.md delete mode 100644 examples/helm-chart/app-protect-dos/values.yaml delete mode 100644 examples/helm-chart/app-protect-waf/values.yaml delete mode 100644 examples/helm-chart/aws-nlb/values.yaml delete mode 100644 examples/helm-chart/azure/values.yaml delete mode 100644 examples/helm-chart/daemon-set/values.yaml delete mode 100644 examples/helm-chart/default/values.yaml delete mode 100644 examples/helm-chart/edge/values.yaml delete mode 100644 examples/helm-chart/external-dns/values.yaml delete mode 100644 examples/helm-chart/nginx-plus/values.yaml delete mode 100644 examples/helm-chart/nodeport/values.yaml delete mode 100644 examples/helm-chart/oidc/values.yaml delete mode 100644 examples/helm-chart/openservicemesh/values.yaml delete mode 100644 examples/helm-chart/read-only-fs/values.yaml delete mode 100644 examples/helm-chart/service-insight/values.yaml delete mode 100755 hack/generate-manifests.sh diff --git a/.github/workflows/build-plus.yml b/.github/workflows/build-plus.yml index 0bfca6ce84..a163444db5 100644 --- a/.github/workflows/build-plus.yml +++ b/.github/workflows/build-plus.yml @@ -171,7 +171,7 @@ jobs: This is the official implementation of NGINX Ingress Controller (based on NGINX Plus) from NGINX. usage-instructions: | This container requires Kubernetes and can be deployed to EKS. - Review the installation instructions https://docs.nginx.com/nginx-ingress-controller/installation/ and utilize the deployment resources available https://github.com/nginxinc/kubernetes-ingress/tree/main/deploy + Review the installation instructions https://docs.nginx.com/nginx-ingress-controller/installation/ and utilize the deployment resources available https://github.com/nginxinc/kubernetes-ingress/tree/master/deployments Use this image instead of building your own. if: ${{ github.ref_type == 'tag' && contains(inputs.target, 'aws') }} diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index 1717b93b48..d34e913b04 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -39,22 +39,29 @@ jobs: DOCS_TO_UPDATE_FOLDER=docs/content FILES_TO_UPDATE_IC_VERSION=( - README.md - charts/nginx-ingress/Chart.yaml - charts/nginx-ingress/README.md - charts/nginx-ingress/values.yaml + README.md + deployments/daemon-set/nginx-ingress.yaml + deployments/daemon-set/nginx-plus-ingress.yaml + deployments/deployment/nginx-ingress.yaml + deployments/deployment/nginx-plus-ingress.yaml + deployments/helm-chart/Chart.yaml + deployments/helm-chart/README.md + deployments/helm-chart/values-icp.yaml + deployments/helm-chart/values-nsm.yaml + deployments/helm-chart/values-plus.yaml + deployments/helm-chart/values.yaml ) FILE_TO_UPDATE_HELM_CHART_VERSION=( - charts/nginx-ingress/Chart.yaml - charts/nginx-ingress/README.md + deployments/helm-chart/Chart.yaml + deployments/helm-chart/README.md ) ic_version=${{ github.event.inputs.version }} helm_chart_version=${{ github.event.inputs.helm_version }} - current_ic_version=$(yq '.appVersion' deploy/crds-nap-dos.yaml kustomize build config/crd/app-protect-waf --load-restrictor='LoadRestrictionsNone' >deploy/crds-nap-waf.yaml -.PHONY: generate-manifests -generate-manifests: ## Generate manifests - ./hack/generate-manifests.sh - .PHONY: certificate-and-key certificate-and-key: ## Create default cert and key ./build/generate_default_cert_and_key.sh diff --git a/README.md b/README.md index e75fb8e0e9..59a075580a 100644 --- a/README.md +++ b/README.md @@ -119,7 +119,7 @@ your links to the correct versions: | Version | Description | Image for NGINX | Image for NGINX Plus | Installation Manifests and Helm Chart | Documentation and Examples | | ------- | ----------- | --------------- | -------------------- | ---------------------------------------| -------------------------- | | Latest stable release | For production use | Use the 3.3.2 images from [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress/), [GitHub Container](https://github.com/nginxinc/kubernetes-ingress/pkgs/container/kubernetes-ingress), [Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-ingress) or [Quay.io](https://quay.io/repository/nginx/nginx-ingress) or [build your own image](https://docs.nginx.com/nginx-ingress-controller/installation/building-ingress-controller-image/). | Use the 3.3.2 images from the [F5 Container Registry](https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image/) or the [AWS Marketplace](https://aws.amazon.com/marketplace/search/?CREATOR=741df81b-dfdc-4d36-b8da-945ea66b522c&FULFILLMENT_OPTION_TYPE=CONTAINER&filters=CREATOR%2CFULFILLMENT_OPTION_TYPE) or [Build your own image](https://docs.nginx.com/nginx-ingress-controller/installation/building-ingress-controller-image/). | [Manifests](https://github.com/nginxinc/kubernetes-ingress/tree/v3.3.2/deployments). [Helm chart](https://github.com/nginxinc/kubernetes-ingress/tree/v3.3.2/deployments/helm-chart). | [Documentation](https://docs.nginx.com/nginx-ingress-controller/). [Examples](https://docs.nginx.com/nginx-ingress-controller/configuration/configuration-examples/). | -| Edge/Nightly | For testing and experimenting | Use the edge or nightly images from [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress/), [GitHub Container](https://github.com/nginxinc/kubernetes-ingress/pkgs/container/kubernetes-ingress), [Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-ingress) or [Quay.io](https://quay.io/repository/nginx/nginx-ingress) or [build your own image](https://github.com/nginxinc/kubernetes-ingress/tree/main/docs/content/installation/building-ingress-controller-image.md). | [Build your own image](https://github.com/nginxinc/kubernetes-ingress/tree/main/docs/content/installation/building-ingress-controller-image.md). | [Manifests](https://github.com/nginxinc/kubernetes-ingress/tree/main/deploy). [Helm chart](https://github.com/nginxinc/kubernetes-ingress/tree/main/charts/nginx-ingress). | [Documentation](https://github.com/nginxinc/kubernetes-ingress/tree/main/docs/content). [Examples](https://github.com/nginxinc/kubernetes-ingress/tree/main/examples). | +| Edge/Nightly | For testing and experimenting | Use the edge or nightly images from [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress/), [GitHub Container](https://github.com/nginxinc/kubernetes-ingress/pkgs/container/kubernetes-ingress), [Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-ingress) or [Quay.io](https://quay.io/repository/nginx/nginx-ingress) or [build your own image](https://github.com/nginxinc/kubernetes-ingress/tree/main/docs/content/installation/building-ingress-controller-image.md). | [Build your own image](https://github.com/nginxinc/kubernetes-ingress/tree/main/docs/content/installation/building-ingress-controller-image.md). | [Manifests](https://github.com/nginxinc/kubernetes-ingress/tree/main/deployments). [Helm chart](https://github.com/nginxinc/kubernetes-ingress/tree/main/charts/nginx-ingress). | [Documentation](https://github.com/nginxinc/kubernetes-ingress/tree/main/docs/content). [Examples](https://github.com/nginxinc/kubernetes-ingress/tree/main/examples). | ## SBOM (Software Bill of Materials) diff --git a/charts/nginx-ingress/README.md b/charts/nginx-ingress/README.md index 142fc9b5a8..e23afecfab 100644 --- a/charts/nginx-ingress/README.md +++ b/charts/nginx-ingress/README.md @@ -78,14 +78,14 @@ To install the chart with the release name my-release (my-release is the name th For NGINX: ```console -helm install my-release -n nginx-ingress --create-namespace oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 +helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 ``` For NGINX Plus: (assuming you have pushed the Ingress Controller image `nginx-plus-ingress` to your private registry `myregistry.example.com`) ```console -helm install my-release -n nginx-ingress --create-namespace oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true +helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true ``` This will install the latest `edge` version of the Ingress Controller from GitHub Container Registry. If you prefer to @@ -100,7 +100,7 @@ CRDs](#upgrading-the-crds). To upgrade the release `my-release`: ```console -helm upgrade my-release -n nginx-ingress oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 +helm upgrade my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 ``` ### Uninstalling the Chart @@ -108,7 +108,7 @@ helm upgrade my-release -n nginx-ingress oci://ghcr.io/nginxinc/charts/nginx-ing To uninstall/delete the release `my-release`: ```console -helm uninstall my-release -n nginx-ingress +helm uninstall my-release ``` The command removes all the Kubernetes components associated with the release and deletes the release. @@ -123,7 +123,7 @@ version is built from the `main` branch of the NGINX Ingress Controller reposito by specifying the `--version` flag with the value `0.0.0-edge`: ```console -helm install my-release -n nginx-ingress --create-namespace oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.0.0-edge +helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.0.0-edge ``` > **Warning** @@ -157,13 +157,13 @@ To install the chart with the release name my-release (my-release is the name th For NGINX: ```console -helm install my-release -n nginx-ingress --create-namespace . +helm install my-release . ``` For NGINX Plus: ```console -helm install my-release -f values-plus.yaml -n nginx-ingress --create-namespace . +helm install my-release -f values-plus.yaml . ``` The command deploys the Ingress Controller in your Kubernetes cluster in the default configuration. The configuration @@ -177,7 +177,7 @@ CRDs](#upgrading-the-crds). To upgrade the release `my-release`: ```console -helm upgrade my-release -n nginx-ingress . +helm upgrade my-release . ``` ### Uninstalling the Chart @@ -185,7 +185,7 @@ helm upgrade my-release -n nginx-ingress . To uninstall/delete the release `my-release`: ```console -helm uninstall my-release -n nginx-ingress +helm uninstall my-release ``` The command removes all the Kubernetes components associated with the release and deletes the release. @@ -379,8 +379,8 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |`controller.watchSecretNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources of type Secret. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See `controller.watchNamespace` and `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchSecretNamespace="default\,nginx-ingress"`. | "" | |`controller.enableCustomResources` | Enable the custom resources. | true | |`controller.enableOIDC` | Enable OIDC policies. | false | -|`controller.enableTLSPassthrough` | Enable TLS Passthrough on port 443. Requires `controller.enableCustomResources`. | false | -|`controller.tlsPassThroughPort` | Set the port for the TLS Passthrough. Requires `controller.enableCustomResources` and `controller.enableTLSPassthrough`. | 443 | +|`controller.enableTLSPassthrough` | Enable TLS Passthrough on default port 443. Requires `controller.enableCustomResources`. | false | +|`controller.tlsPassThroughPort` | Set the port for the TLS Passthrough. Requires `controller.enableCustomResources` and `controller.enableTLSPassthrough`. | 443 | |`controller.enableCertManager` | Enable x509 automated certificate management for VirtualServer resources using cert-manager (cert-manager.io). Requires `controller.enableCustomResources`. | false | |`controller.enableExternalDNS` | Enable integration with ExternalDNS for configuring public DNS entries for VirtualServer resources using [ExternalDNS](https://github.com/kubernetes-sigs/external-dns). Requires `controller.enableCustomResources`. | false | |`controller.globalConfiguration.create` | Creates the GlobalConfiguration custom resource. Requires `controller.enableCustomResources`. | false | diff --git a/charts/nginx-ingress/templates/controller-daemonset.yaml b/charts/nginx-ingress/templates/controller-daemonset.yaml index 21e07e2fe9..243907770a 100644 --- a/charts/nginx-ingress/templates/controller-daemonset.yaml +++ b/charts/nginx-ingress/templates/controller-daemonset.yaml @@ -230,9 +230,9 @@ spec: - -disable-ipv6={{ .Values.controller.disableIPV6 }} {{- if .Values.controller.enableCustomResources }} - -enable-tls-passthrough={{ .Values.controller.enableTLSPassthrough }} -{{- if .Values.controller.enableTLSPassthrough }} +{{ if .Values.controller.enableTLSPassthrough }} - -tls-passthrough-port={{ .Values.controller.tlsPassthroughPort }} -{{- end }} +{{ end }} - -enable-cert-manager={{ .Values.controller.enableCertManager }} - -enable-oidc={{ .Values.controller.enableOIDC }} - -enable-external-dns={{ .Values.controller.enableExternalDNS }} diff --git a/charts/nginx-ingress/templates/controller-deployment.yaml b/charts/nginx-ingress/templates/controller-deployment.yaml index 341911c759..febddbdc5d 100644 --- a/charts/nginx-ingress/templates/controller-deployment.yaml +++ b/charts/nginx-ingress/templates/controller-deployment.yaml @@ -173,14 +173,14 @@ spec: - -enable-app-protect={{ .Values.controller.appprotect.enable }} {{- if and .Values.controller.appprotect.enable .Values.controller.appprotect.logLevel }} - -app-protect-log-level={{ .Values.controller.appprotect.logLevel }} -{{- end }} +{{ end }} - -enable-app-protect-dos={{ .Values.controller.appprotectdos.enable }} {{- if .Values.controller.appprotectdos.enable }} - -app-protect-dos-debug={{ .Values.controller.appprotectdos.debug }} - -app-protect-dos-max-daemons={{ .Values.controller.appprotectdos.maxDaemons }} - -app-protect-dos-max-workers={{ .Values.controller.appprotectdos.maxWorkers }} - -app-protect-dos-memory={{ .Values.controller.appprotectdos.memory }} -{{- end }} +{{ end }} - -nginx-configmaps=$(POD_NAMESPACE)/{{ include "nginx-ingress.configName" . }} {{- if .Values.controller.defaultTLS.secret }} - -default-server-tls-secret={{ .Values.controller.defaultTLS.secret }} @@ -237,9 +237,9 @@ spec: - -disable-ipv6={{ .Values.controller.disableIPV6 }} {{- if .Values.controller.enableCustomResources }} - -enable-tls-passthrough={{ .Values.controller.enableTLSPassthrough }} -{{- if .Values.controller.enableTLSPassthrough }} +{{ if .Values.controller.enableTLSPassthrough }} - -tls-passthrough-port={{ .Values.controller.tlsPassthroughPort }} -{{- end }} +{{ end }} - -enable-cert-manager={{ .Values.controller.enableCertManager }} - -enable-oidc={{ .Values.controller.enableOIDC }} - -enable-external-dns={{ .Values.controller.enableExternalDNS }} diff --git a/charts/nginx-ingress/values-nsm.yaml b/charts/nginx-ingress/values-nsm.yaml new file mode 100644 index 0000000000..47d11e0571 --- /dev/null +++ b/charts/nginx-ingress/values-nsm.yaml @@ -0,0 +1,6 @@ +controller: + name: controller + enableLatencyMetrics: true +nginxServiceMesh: + enable: true + enableEgress: true diff --git a/charts/nginx-ingress/values.yaml b/charts/nginx-ingress/values.yaml index 60ba43eb39..dc8997c7bb 100644 --- a/charts/nginx-ingress/values.yaml +++ b/charts/nginx-ingress/values.yaml @@ -211,7 +211,7 @@ controller: ## The number of Ingress Controller pods that can be unavailable. This is a mutually exclusive setting with "minAvailable". # maxUnavailable: 1 - ## Strategy used to replace old Pods by new ones. .spec.strategy.type can be "Recreate" or "RollingUpdate" for Deployments, and "OnDelete" or "RollingUpdate" for Daemonsets. "RollingUpdate" is the default value. + ## Strategy used to replace old Pods by new ones. .spec.strategy.type can be "Recreate" or "RollingUpdate" for Deployments, and "OnDelete" or "RollingUpdate" for Daemonsets. "RollingUpdate" is the default value. strategy: {} ## Extra containers for the Ingress Controller pods. @@ -275,7 +275,7 @@ controller: create: false ## The spec of the GlobalConfiguration for defining the global configuration parameters of the Ingress Controller. - spec: {} + spec: {} ## Ensure both curly brackets are removed when adding listeners in YAML format. # listeners: # - name: dns-udp # port: 5353 @@ -380,7 +380,7 @@ controller: ## The name of the service account of the Ingress Controller pods. Used for RBAC. ## Autogenerated if not set or set to "". - name: nginx-ingress + # name: nginx-ingress ## The name of the secret containing docker registry credentials. ## Secret must exist in the same namespace as the helm release. diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml deleted file mode 100644 index f3639e3726..0000000000 --- a/config/base/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- deploy.yaml -- namespace.yaml diff --git a/config/base/namespace.yaml b/config/base/namespace.yaml deleted file mode 100644 index 4b60fec13d..0000000000 --- a/config/base/namespace.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress diff --git a/config/overlays/app-protect-dos/deployment.yaml b/config/overlays/app-protect-dos/deployment.yaml deleted file mode 100644 index 3654af01a6..0000000000 --- a/config/overlays/app-protect-dos/deployment.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# This is generated from https://github.com/nginxinc/nap-dos-arbitrator-helm-chart -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-appprotect-dos-arbitrator - namespace: nginx-ingress - labels: - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - app.kubernetes.io/version: "1.1.1" -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - template: - metadata: - labels: - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: nginx-appprotect-dos-arbitrator - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsUser: 1001 - image: "docker-registry.nginx.com/nap-dos/app_protect_dos_arb:1.1.1" - imagePullPolicy: IfNotPresent - ports: - - name: arb - containerPort: 3000 - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi diff --git a/config/overlays/app-protect-dos/kustomization.yaml b/config/overlays/app-protect-dos/kustomization.yaml deleted file mode 100644 index 31cb687baf..0000000000 --- a/config/overlays/app-protect-dos/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- ../../base -- deployment.yaml -- service.yaml -# - ../../crd/bases/appprotectdos.f5.com_apdoslogconfs.yaml -# - ../../crd/bases/appprotectdos.f5.com_apdospolicy.yaml -# - ../../crd/bases/appprotectdos.f5.com_dosprotectedresources.yaml diff --git a/config/overlays/app-protect-dos/service.yaml b/config/overlays/app-protect-dos/service.yaml deleted file mode 100644 index 624f0eff6e..0000000000 --- a/config/overlays/app-protect-dos/service.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# This is generated from https://github.com/nginxinc/nap-dos-arbitrator-helm-chart -apiVersion: v1 -kind: Service -metadata: - name: nginx-appprotect-dos-arbitrator - namespace: nginx-ingress - labels: - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - app.kubernetes.io/version: "1.1.1" -spec: - type: ClusterIP - ports: - - port: 3000 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator diff --git a/deploy/app-protect-dos/deploy.yaml b/deploy/app-protect-dos/deploy.yaml deleted file mode 100644 index 4d6119d510..0000000000 --- a/deploy/app-protect-dos/deploy.yaml +++ /dev/null @@ -1,443 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -imagePullSecrets: -- name: nginx-registry-credentials -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - appprotectdos.f5.com - resources: - - apdospolicies - - apdoslogconfs - - dosprotectedresources - verbs: - - get - - watch - - list -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - app.kubernetes.io/version: 1.1.1 - name: nginx-appprotect-dos-arbitrator - namespace: nginx-ingress -spec: - ports: - - name: http - port: 3000 - protocol: TCP - targetPort: http - selector: - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - app.kubernetes.io/version: 1.1.1 - name: nginx-appprotect-dos-arbitrator - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - template: - metadata: - labels: - app.kubernetes.io/instance: nginx-appprotect-dos-arbitrator - app.kubernetes.io/name: nginx-appprotect-dos-arbitrator - spec: - containers: - - image: docker-registry.nginx.com/nap-dos/app_protect_dos_arb:1.1.1 - imagePullPolicy: IfNotPresent - name: nginx-appprotect-dos-arbitrator - ports: - - containerPort: 3000 - name: arb - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsUser: 1001 - securityContext: {} - serviceAccountName: default ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=true - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=true - - -app-protect-dos-debug=false - - -app-protect-dos-max-daemons=0 - - -app-protect-dos-max-workers=0 - - -app-protect-dos-memory=0 - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: private-registry.nginx.com/nginx-ic-dos/nginx-plus-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/app-protect-waf/deploy.yaml b/deploy/app-protect-waf/deploy.yaml deleted file mode 100644 index 613248ccc2..0000000000 --- a/deploy/app-protect-waf/deploy.yaml +++ /dev/null @@ -1,378 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -imagePullSecrets: -- name: nginx-registry-credentials -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - appprotect.f5.com - resources: - - appolicies - - aplogconfs - - apusersigs - verbs: - - get - - watch - - list -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=true - - -nginx-reload-timeout=60000 - - -enable-app-protect=true - - -app-protect-log-level=error - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: private-registry.nginx.com/nginx-ic-nap/nginx-plus-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/aws-nlb/deploy.yaml b/deploy/aws-nlb/deploy.yaml deleted file mode 100644 index e64cf12dba..0000000000 --- a/deploy/aws-nlb/deploy.yaml +++ /dev/null @@ -1,371 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: - proxy-protocol: "True" - real-ip-header: proxy_protocol - set-real-ip-from: 0.0.0.0/0 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip - service.beta.kubernetes.io/aws-load-balancer-type: nlb - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/azure/deploy.yaml b/deploy/azure/deploy.yaml deleted file mode 100644 index 2e392b7e98..0000000000 --- a/deploy/azure/deploy.yaml +++ /dev/null @@ -1,367 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - nodeSelector: - kubernetes.io/os: linux - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/crds b/deploy/crds deleted file mode 120000 index c29b1b8fe4..0000000000 --- a/deploy/crds +++ /dev/null @@ -1 +0,0 @@ -../config/crd/bases/ \ No newline at end of file diff --git a/deploy/daemon-set/deploy.yaml b/deploy/daemon-set/deploy.yaml deleted file mode 100644 index 96c993c724..0000000000 --- a/deploy/daemon-set/deploy.yaml +++ /dev/null @@ -1,364 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/default/deploy.yaml b/deploy/default/deploy.yaml deleted file mode 100644 index a55443065b..0000000000 --- a/deploy/default/deploy.yaml +++ /dev/null @@ -1,365 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/edge/deploy.yaml b/deploy/edge/deploy.yaml deleted file mode 100644 index 046f4812db..0000000000 --- a/deploy/edge/deploy.yaml +++ /dev/null @@ -1,366 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - nodePort: null - port: 80 - protocol: TCP - targetPort: 80 - - name: https - nodePort: null - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: NodePort ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:edge - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/external-dns/deploy.yaml b/deploy/external-dns/deploy.yaml deleted file mode 100644 index 10307b41e5..0000000000 --- a/deploy/external-dns/deploy.yaml +++ /dev/null @@ -1,382 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update -- apiGroups: - - externaldns.nginx.org - resources: - - dnsendpoints - verbs: - - list - - watch - - get - - update - - create - - delete -- apiGroups: - - externaldns.nginx.org - resources: - - dnsendpoints/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=true - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/nginx-plus/deploy.yaml b/deploy/nginx-plus/deploy.yaml deleted file mode 100644 index f5833d5af7..0000000000 --- a/deploy/nginx-plus/deploy.yaml +++ /dev/null @@ -1,367 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -imagePullSecrets: -- name: nginx-registry-credentials -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=true - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/nodeport/deploy.yaml b/deploy/nodeport/deploy.yaml deleted file mode 100644 index df1b3adcc7..0000000000 --- a/deploy/nodeport/deploy.yaml +++ /dev/null @@ -1,366 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - nodePort: null - port: 80 - protocol: TCP - targetPort: 80 - - name: https - nodePort: null - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: NodePort ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/oidc/deploy.yaml b/deploy/oidc/deploy.yaml deleted file mode 100644 index f3038348ca..0000000000 --- a/deploy/oidc/deploy.yaml +++ /dev/null @@ -1,367 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -imagePullSecrets: -- name: nginx-registry-credentials -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=true - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=true - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/openservicemesh/deploy.yaml b/deploy/openservicemesh/deploy.yaml deleted file mode 100644 index ab2036ff22..0000000000 --- a/deploy/openservicemesh/deploy.yaml +++ /dev/null @@ -1,366 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - openservicemesh.io/inbound-port-exclusion-list: 80, 443 - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/read-only-fs/deploy.yaml b/deploy/read-only-fs/deploy.yaml deleted file mode 100644 index 81a55228af..0000000000 --- a/deploy/read-only-fs/deploy.yaml +++ /dev/null @@ -1,375 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=false - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=false - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 101 - volumeMounts: - - mountPath: /etc/nginx - name: nginx-etc - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /var/lib/nginx - name: nginx-lib - - mountPath: /var/log/nginx - name: nginx-log - dnsPolicy: ClusterFirst - hostNetwork: false - initContainers: - - command: - - cp - - -vdR - - /etc/nginx/. - - /mnt/etc - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: init-nginx-ingress - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 101 - volumeMounts: - - mountPath: /mnt/etc - name: nginx-etc - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: nginx-etc - - emptyDir: {} - name: nginx-cache - - emptyDir: {} - name: nginx-lib - - emptyDir: {} - name: nginx-log ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/service-insight/deploy.yaml b/deploy/service-insight/deploy.yaml deleted file mode 100644 index 64aa25a8ec..0000000000 --- a/deploy/service-insight/deploy.yaml +++ /dev/null @@ -1,365 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nginx-ingress ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - pods - verbs: - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resourceNames: - - nginx-ingress-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -rules: -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers - - virtualserverroutes - - globalconfigurations - - transportservers - - policies - verbs: - - list - - watch - - get -- apiGroups: - - k8s.nginx.org - resources: - - virtualservers/status - - virtualserverroutes/status - - policies/status - - transportservers/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress -subjects: -- kind: ServiceAccount - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -data: null -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress - namespace: nginx-ingress ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-leader - namespace: nginx-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - externalTrafficPolicy: Local - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 80 - - name: https - port: 443 - protocol: TCP - targetPort: 443 - selector: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - type: LoadBalancer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx-ingress-controller - namespace: nginx-ingress -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - template: - metadata: - annotations: - prometheus.io/port: "9113" - prometheus.io/scheme: http - prometheus.io/scrape: "true" - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - spec: - automountServiceAccountToken: true - containers: - - args: - - -nginx-plus=true - - -nginx-reload-timeout=60000 - - -enable-app-protect=false - - -enable-app-protect-dos=false - - -nginx-configmaps=$(POD_NAMESPACE)/nginx-ingress - - -ingress-class=nginx - - -health-status=false - - -health-status-uri=/nginx-health - - -nginx-debug=false - - -v=1 - - -nginx-status=true - - -nginx-status-port=8080 - - -nginx-status-allow-cidrs=127.0.0.1 - - -report-ingress-status - - -external-service=nginx-ingress-controller - - -enable-leader-election=true - - -leader-election-lock-name=nginx-ingress-leader - - -enable-prometheus-metrics=true - - -prometheus-metrics-listen-port=9113 - - -prometheus-tls-secret= - - -enable-service-insight=false - - -service-insight-listen-port=9114 - - -service-insight-tls-secret= - - -enable-custom-resources=true - - -enable-snippets=false - - -include-year=false - - -disable-ipv6=false - - -enable-tls-passthrough=false - - -enable-preview-policies= - - -enable-cert-manager=false - - -enable-oidc=false - - -enable-external-dns=false - - -default-http-listener-port=80 - - -default-https-listener-port=443 - - -ready-status=true - - -ready-status-port=8081 - - -enable-latency-metrics=false - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: nginx/nginx-ingress:3.3.2 - imagePullPolicy: IfNotPresent - name: nginx-ingress - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 9113 - name: prometheus - - containerPort: 8081 - name: readiness-port - readinessProbe: - httpGet: - path: /nginx-ready - port: readiness-port - initialDelaySeconds: 0 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 128Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - dnsPolicy: ClusterFirst - hostNetwork: false - securityContext: - seccompProfile: - type: RuntimeDefault - serviceAccountName: nginx-ingress - terminationGracePeriodSeconds: 30 ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/instance: nginx-ingress - app.kubernetes.io/name: nginx-ingress - app.kubernetes.io/version: 3.3.2 - name: nginx -spec: - controller: nginx.org/ingress-controller diff --git a/deploy/README.md b/deployments/README.md similarity index 100% rename from deploy/README.md rename to deployments/README.md diff --git a/tests/data/common/ingress-class.yaml b/deployments/common/ingress-class.yaml similarity index 100% rename from tests/data/common/ingress-class.yaml rename to deployments/common/ingress-class.yaml diff --git a/tests/data/common/nginx-config.yaml b/deployments/common/nginx-config.yaml similarity index 100% rename from tests/data/common/nginx-config.yaml rename to deployments/common/nginx-config.yaml diff --git a/tests/data/common/ns-and-sa.yaml b/deployments/common/ns-and-sa.yaml similarity index 100% rename from tests/data/common/ns-and-sa.yaml rename to deployments/common/ns-and-sa.yaml diff --git a/tests/data/daemon-set/nginx-ingress.yaml b/deployments/daemon-set/nginx-ingress.yaml similarity index 100% rename from tests/data/daemon-set/nginx-ingress.yaml rename to deployments/daemon-set/nginx-ingress.yaml diff --git a/tests/data/daemon-set/nginx-plus-ingress.yaml b/deployments/daemon-set/nginx-plus-ingress.yaml similarity index 100% rename from tests/data/daemon-set/nginx-plus-ingress.yaml rename to deployments/daemon-set/nginx-plus-ingress.yaml diff --git a/tests/data/deployment/appprotect-dos-arb.yaml b/deployments/deployment/appprotect-dos-arb.yaml similarity index 100% rename from tests/data/deployment/appprotect-dos-arb.yaml rename to deployments/deployment/appprotect-dos-arb.yaml diff --git a/tests/data/deployment/nginx-ingress.yaml b/deployments/deployment/nginx-ingress.yaml similarity index 100% rename from tests/data/deployment/nginx-ingress.yaml rename to deployments/deployment/nginx-ingress.yaml diff --git a/tests/data/deployment/nginx-plus-ingress.yaml b/deployments/deployment/nginx-plus-ingress.yaml similarity index 100% rename from tests/data/deployment/nginx-plus-ingress.yaml rename to deployments/deployment/nginx-plus-ingress.yaml diff --git a/tests/data/rbac/ap-rbac.yaml b/deployments/rbac/ap-rbac.yaml similarity index 100% rename from tests/data/rbac/ap-rbac.yaml rename to deployments/rbac/ap-rbac.yaml diff --git a/tests/data/rbac/apdos-rbac.yaml b/deployments/rbac/apdos-rbac.yaml similarity index 100% rename from tests/data/rbac/apdos-rbac.yaml rename to deployments/rbac/apdos-rbac.yaml diff --git a/tests/data/rbac/rbac.yaml b/deployments/rbac/rbac.yaml similarity index 100% rename from tests/data/rbac/rbac.yaml rename to deployments/rbac/rbac.yaml diff --git a/tests/data/service/appprotect-dos-arb-svc.yaml b/deployments/service/appprotect-dos-arb-svc.yaml similarity index 100% rename from tests/data/service/appprotect-dos-arb-svc.yaml rename to deployments/service/appprotect-dos-arb-svc.yaml diff --git a/deployments/service/loadbalancer-aws-elb.yaml b/deployments/service/loadbalancer-aws-elb.yaml new file mode 100644 index 0000000000..ee66f46244 --- /dev/null +++ b/deployments/service/loadbalancer-aws-elb.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-ingress + namespace: nginx-ingress + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + app: nginx-ingress diff --git a/deployments/service/loadbalancer.yaml b/deployments/service/loadbalancer.yaml new file mode 100644 index 0000000000..d27ca5bc6a --- /dev/null +++ b/deployments/service/loadbalancer.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-ingress + namespace: nginx-ingress +spec: + externalTrafficPolicy: Local + type: LoadBalancer + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + app: nginx-ingress diff --git a/deployments/service/nodeport.yaml b/deployments/service/nodeport.yaml new file mode 100644 index 0000000000..f263b66388 --- /dev/null +++ b/deployments/service/nodeport.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-ingress + namespace: nginx-ingress +spec: + type: NodePort + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + app: nginx-ingress diff --git a/docs/content/app-protect-dos/installation.md b/docs/content/app-protect-dos/installation.md index 2bc31daa1e..2b77b0dd34 100644 --- a/docs/content/app-protect-dos/installation.md +++ b/docs/content/app-protect-dos/installation.md @@ -13,11 +13,16 @@ This document provides an overview of the steps required to use NGINX App Protec ## Prerequisites -Make sure you have access to the Ingress Controller image: +1. Make sure you have access to the Ingress Controller image: + - For NGINX Plus Ingress Controller, see [here](/nginx-ingress-controller/installation/pulling-ingress-controller-image) for details on how to pull the image from the F5 Docker registry. + - To pull from the F5 Container registry in your Kubernetes cluster, configure a docker registry secret using your JWT token from the MyF5 portal by following the instructions from [here](/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret). + - It is also possible to build your own image and push it to your private Docker registry by following the instructions from [here](/nginx-ingress-controller/installation/building-ingress-controller-image). +2. Clone the Ingress Controller repo: -- For NGINX Plus Ingress Controller, see [here](/nginx-ingress-controller/installation/pulling-ingress-controller-image) for details on how to pull the image from the F5 Docker registry. -- To pull from the F5 Container registry in your Kubernetes cluster, configure a docker registry secret using your JWT token from the MyF5 portal by following the instructions from [here](/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret). -- It is also possible to build your own image and push it to your private Docker registry by following the instructions from [here](/nginx-ingress-controller/installation/building-ingress-controller-image). + ``` + git clone https://github.com/nginxinc/kubernetes-ingress.git --branch v3.3.2 + cd kubernetes-ingress/deployments + ``` ## Install the App Protect DoS Arbitrator diff --git a/docs/content/configuration/security.md b/docs/content/configuration/security.md index ea87930681..472369b450 100644 --- a/docs/content/configuration/security.md +++ b/docs/content/configuration/security.md @@ -22,10 +22,12 @@ In addition, the following relating more specifically to Ingress Controller. The Ingress Controller is deployed within a Kubernetes environment, this environment must be secured. Kubernetes uses [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to control the resources and operations available to different types of users. The Ingress Controller requires a service account which is configured using RBAC. -We strongly recommend using the RBAC configuration provided in our deployment configurations. +We strongly recommend using the [RBAC configuration](https://github.com/nginxinc/kubernetes-ingress/blob/main/deployments/rbac/rbac.yaml) provided in our standard deployment configuration. It is configured with the least amount of privilege required for the Ingress Controller to work. -We strongly recommend inspecting the RBAC configuration in the deployment file or Helm chart to understand what access the Ingress Controller service account has and to which resources. +We strongly recommend inspecting the RBAC configuration (for [manifests installation](https://github.com/nginxinc/kubernetes-ingress/blob/main/deployments/rbac/rbac.yaml) +or for [helm](https://github.com/nginxinc/kubernetes-ingress/blob/main/charts/nginx-ingress/templates/rbac.yaml)) +to understand what access the Ingress Controller service account has and to which resources. For example, by default the service account has access to all Secret resources in the cluster. ### Certificates and Privacy Keys @@ -56,11 +58,58 @@ Snippets are disabled by default. To use snippets, set the [`enable-snippets`](/ The F5 Nginx Ingress Controller (NIC) has various protections against attacks, such as running the service as non-root to avoid changes to files. An additional industry best practice is having root filesystems set as read-only so that the attack surface is further reduced by limiting changes to binaries and libraries. -Currently, we do not set read-only root filesystem as default. Instead, this is an opt-in feature available on the [Helm Chart](/nginx-ingress-controller/installation/installation-with-helm/#configuration) -via `controller.readOnlyRootFilesystem`. +Currently, we do not set read-only root filesystem as default. Instead, this is an opt-in feature available on the [helm-chart](/nginx-ingress-controller/installation/installation-with-helm/#configuration) via `controller.readOnlyRootFilesystem`. +When using manifests instead of Helm, uncomment the following sections of the deployment: -If you prefer to use manifests instead of Helm, you can use the following manifest to enable this feature: +- `readOnlyRootFilesystem: true`, +- The entire `volumeMounts` section, +- The entire `initContiners` section, +- For `initContainers:image:`, use exact same image used for regular NIC installation. +Refer to the below code-block for guidance: -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/read-only-fs/deploy.yaml +``` +# volumes: +# - name: nginx-etc +# emptyDir: {} +# - name: nginx-cache +# emptyDir: {} +# - name: nginx-lib +# emptyDir: {} +# - name: nginx-log +# emptyDir: {} +. +. +. +# readOnlyRootFilesystem: true +. +. +. +# volumeMounts: +# - mountPath: /etc/nginx +# name: nginx-etc +# - mountPath: /var/cache/nginx +# name: nginx-cache +# - mountPath: /var/lib/nginx +# name: nginx-lib +# - mountPath: /var/log/nginx +# name: nginx-log +. +. +. +# initContainers: +# - image: : +# imagePullPolicy: IfNotPresent +# name: init-nginx-ingress +# command: ['cp', '-vdR', '/etc/nginx/.', '/mnt/etc'] +# securityContext: +# allowPrivilegeEscalation: false +# readOnlyRootFilesystem: true +# runAsUser: 101 #nginx +# runAsNonRoot: true +# capabilities: +# drop: +# - ALL +# volumeMounts: +# - mountPath: /mnt/etc +# name: nginx-etc ``` diff --git a/docs/content/installation/installation-with-helm.md b/docs/content/installation/installation-with-helm.md index 7477e9c628..6407301529 100644 --- a/docs/content/installation/installation-with-helm.md +++ b/docs/content/installation/installation-with-helm.md @@ -1,7 +1,7 @@ --- title: Installation with Helm description: This document describes how to install the NGINX Ingress Controller in your Kubernetes cluster using Helm. -weight: 1800 +weight: 1900 doctypes: [""] toc: true docs: "DOCS-602" diff --git a/docs/content/installation/installation-with-manifests.md b/docs/content/installation/installation-with-manifests.md index 76fde3d037..c9554de479 100644 --- a/docs/content/installation/installation-with-manifests.md +++ b/docs/content/installation/installation-with-manifests.md @@ -1,7 +1,7 @@ --- title: Installation with Manifests description: "This document describes how to install the NGINX Ingress Controller in your Kubernetes cluster using Kubernetes manifests." -weight: 1900 +weight: 1800 doctypes: [""] aliases: - /installation/ @@ -11,218 +11,196 @@ docs: "DOCS-603" ## Prerequisites +{{}} All documentation should only be used with the latest stable release, indicated on [the releases page](https://github.com/nginxinc/kubernetes-ingress/releases) of the GitHub repository. {{}} + 1. Make sure you have access to an NGINX Ingress Controller image: - - For NGINX Ingress Controller, use the images from [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress), - [GitHub Container Registry](https://github.com/nginxinc/kubernetes-ingress/pkgs/container/kubernetes-ingress), - [Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-ingress) or - [Quay.io](https://quay.io/repository/nginx/nginx-ingress). - - For NGINX Plus Ingress Controller, see - [here](/nginx-ingress-controller/installation/pulling-ingress-controller-image) for details on pulling the image - from the F5 Docker registry. - - To pull from the F5 Container registry in your Kubernetes cluster, configure a docker registry secret using your - JWT token from the MyF5 portal by following the instructions from - [here](/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret). + - For NGINX Ingress Controller, use the image `nginx/nginx-ingress` from [DockerHub](https://hub.docker.com/r/nginx/nginx-ingress). + - For NGINX Plus Ingress Controller, see [here](/nginx-ingress-controller/installation/pulling-ingress-controller-image) for details on pulling the image from the F5 Docker registry. + - To pull from the F5 Container registry in your Kubernetes cluster, configure a docker registry secret using your JWT token from the MyF5 portal by following the instructions from [here](/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret). - You can also build your own image and push it to your private Docker registry by following the instructions from [here](/nginx-ingress-controller/installation/building-ingress-controller-image). -2. All the commands in this document directly apply the YAML files from the repository. If you prefer, you can download - the files and modify them according to your requirements. +2. Clone the NGINX Ingress Controller repository and change into the deployments folder: -{{}} To perform some of the following steps you must be a cluster admin. Follow the documentation of your -Kubernetes platform to configure the admin access. For Google Kubernetes Engine, see their [Role-Based Access -Control](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control) documentation.{{}} + ```shell + git clone https://github.com/nginxinc/kubernetes-ingress.git --branch v3.3.2 + cd kubernetes-ingress/deployments + ``` ---- + {{}}The above command will clone the branch of the latest NGINX Ingress Controller release, and all documentation assumes you are using it.{{}} -## 1. Create Custom Resources +--- -{{}} -By default, it is required to create custom resource definitions for VirtualServer, VirtualServerRoute, TransportServer -and Policy. Otherwise, NGINX Ingress Controller pods will not become `Ready`. If you'd like to disable that requirement, -configure -[`-enable-custom-resources`](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments#cmdoption-global-configuration) -command-line argument to `false` and skip this section. -{{}} +## 1. Configure RBAC -1. Create custom resource definitions for [VirtualServer and VirtualServerRoute](/nginx-ingress-controller/configuration/virtualserver-and-virtualserverroute-resources), - [TransportServer](/nginx-ingress-controller/configuration/transportserver-resource), - [Policy](/nginx-ingress-controller/configuration/policy-resource) and - [GlobalConfiguration](/nginx-ingress-controller/configuration/global-configuration/globalconfiguration-resource) - resources: +1. Create a namespace and a service account for NGINX Ingress Controller: ```shell - kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/crds.yaml + kubectl apply -f common/ns-and-sa.yaml ``` -2. If you would like to use the NGINX App Protect WAF module, you will need to create custom resource definitions for - `APPolicy`, `APLogConf` and `APUserSig`: +2. Create a cluster role and cluster role binding for the service account: ```shell - kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/crds-nap-waf.yaml + kubectl apply -f rbac/rbac.yaml ``` -3. If you would like to use the NGINX App Protect DoS module, you will need to create custom resource definitions for - `APDosPolicy`, `APDosLogConf` and `DosProtectedResource`: +3. (App Protect only) Create the App Protect role and role binding: ```shell - kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/crds-nap-dos.yaml + kubectl apply -f rbac/ap-rbac.yaml ``` ---- +4. (App Protect DoS only) Create the App Protect DoS role and role binding: -## 2. Deploying NGINX Ingress Controller + ```shell + kubectl apply -f rbac/apdos-rbac.yaml + ``` -The NGINX Ingress Controller repository contains deployment files with all the resources needed in a single file (except -for the CRDs above). You can run the commands as is or or customize them according to your requirements, for example to -update the [command line arguments](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments) -documentation for more details. +{{}} To perform this step you must be a cluster admin. Follow the documentation of your Kubernetes platform to configure the admin access. For Google Kubernetes Engine, see their [Role-Based Access Control](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control) documentation.{{}} --- -### 2.1 Running NGINX Ingress Controller +## 2. Create Common Resources -There are multiple sample deployment files available in the repository. Choose the one that best suits your needs. +In this section, we create resources common for most of NGINX Ingress Controller installations: +{{}} +Installing the `default-server-secret.yaml` is optional and is required only if you are using the [default server TLS secret](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments#cmdoption-default-server-tls-secret) command line argument. It is recommended that users provide their own certificate. +Otherwise, step 1 can be ignored. +{{}} -{{}} +1. Create a secret with a TLS certificate and a key for the default server in NGINX (below assumes you are in the `kubernetes-ingress/deployment` directory): -{{%tab name="Deployment"%}} + ```console + kubectl apply -f ../examples/shared-examples/default-server-secret/default-server-secret.yaml + ``` -This is a default deployment file. It deploys the NGINX Ingress Controller as a Deployment. + {{}} The default server returns the Not Found page with the 404 status code for all requests for domains for which there are no Ingress rules defined. For testing purposes we include a self-signed certificate and key that we generated. However, we recommend that you use your own certificate and key. {{}} -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/default/deploy.yaml -``` +1. Create a config map for customizing NGINX configuration: -{{%/tab%}} + ```console + kubectl apply -f common/nginx-config.yaml + ``` -{{%tab name="DaemonSet"%}} +1. Create an IngressClass resource: -This is a default daemonset file. It deploys the NGINX Ingress Controller as a DaemonSet. + ```console + kubectl apply -f common/ingress-class.yaml + ``` -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/default/daemonset.yaml -``` + If you would like to set this NGINX Ingress Controller instance as the default, uncomment the annotation `ingressclass.kubernetes.io/is-default-class`. With this annotation set to true all the new Ingresses without an ingressClassName field specified will be assigned this IngressClass. -{{%/tab%}} + {{}} NGINX Ingress Controller will fail to start without an IngressClass resource. {{}} -{{%tab name="Azure"%}} +--- -Deploys NGINX Ingress Controller using a nodeSelector to deploy the controller on Azure nodes. +## 3. Create Custom Resources -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/azure/deploy.yaml -``` +{{}} +By default, it is required to create custom resource definitions for VirtualServer, VirtualServerRoute, TransportServer and Policy. Otherwise, NGINX Ingress Controller pods will not become `Ready`. If you'd like to disable that requirement, configure [`-enable-custom-resources`](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments#cmdoption-global-configuration) command-line argument to `false` and skip this section. +{{}} -{{%/tab%}} +1. Create custom resource definitions for [VirtualServer and VirtualServerRoute](/nginx-ingress-controller/configuration/virtualserver-and-virtualserverroute-resources), [TransportServer](/nginx-ingress-controller/configuration/transportserver-resource) and [Policy](/nginx-ingress-controller/configuration/policy-resource) resources: -{{%tab name="AWS NLB"%}} + ```console + kubectl apply -f common/crds/k8s.nginx.org_virtualservers.yaml + kubectl apply -f common/crds/k8s.nginx.org_virtualserverroutes.yaml + kubectl apply -f common/crds/k8s.nginx.org_transportservers.yaml + kubectl apply -f common/crds/k8s.nginx.org_policies.yaml + ``` - Deploys NGINX Ingress Controller using a Service type of `LoadBalancer` to allocate an AWS - Network Load Balancer (NLB). +2. If you would like to use the TCP and UDP load balancing features, create a custom resource definition for the [GlobalConfiguration](/nginx-ingress-controller/configuration/global-configuration/globalconfiguration-resource) resource: -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/aws-nlb/deploy.yaml -``` + ```console + kubectl apply -f common/crds/k8s.nginx.org_globalconfigurations.yaml + ``` -{{%/tab%}} +3. If you would like to use the App Protect WAF module, you will need to create custom resource definitions for `APPolicy`, `APLogConf` and `APUserSig`: -{{%tab name="OIDC"%}} + ```console + kubectl apply -f common/crds/appprotect.f5.com_aplogconfs.yaml + kubectl apply -f common/crds/appprotect.f5.com_appolicies.yaml + kubectl apply -f common/crds/appprotect.f5.com_apusersigs.yaml + ``` -Deploys NGINX Ingress Controller with OpenID Connect (OIDC) authentication enabled. +4. If you would like to use the App Protect DoS module, you will need to create custom resource definitions for `APDosPolicy`, `APDosLogConf` and `DosProtectedResource`: -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/oidc/deploy.yaml -``` + ```console + kubectl apply -f common/crds/appprotectdos.f5.com_apdoslogconfs.yaml + kubectl apply -f common/crds/appprotectdos.f5.com_apdospolicy.yaml + kubectl apply -f common/crds/appprotectdos.f5.com_dosprotectedresources.yaml + ``` -{{%/tab%}} +--- -{{%tab name="NGINX Plus"%}} +## 4. Deploying NGINX Ingress Controller -Deploys NGINX Ingress Controller with the NGINX Plus. The image is pulled from the -NGINX Plus Docker registry, and the `imagePullSecretName` is the name of the secret to use to pull the image. -The secret must be created in the same namespace as the NGINX Ingress Controller. +There are two options for deploying NGINX Ingress Controller: -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/nginx-plus/deploy.yaml -``` +- *Deployment*. Use a Deployment if you plan to dynamically change the number of Ingress Controller replicas. +- *DaemonSet*. Use a DaemonSet for deploying the Ingress Controller on every node or a subset of nodes. -{{%/tab%}} +Additionally, if you would like to use the NGINX App Protect DoS module, you'll need to deploy the Arbitrator. -{{%tab name="NGINX App Protect WAF"%}} +{{}} Before creating a Deployment or Daemonset resource, make sure to update the [command-line arguments](/nginx-ingress-controller/configuration/global-configuration/command-line-arguments) of NGINX Ingress Controller container in the corresponding manifest file according to your requirements. {{}} -Deploys NGINX Ingress Controller with the NGINX App Protect WAF module enabled. The image is pulled from the NGINX Plus -Docker registry, and the `imagePullSecretName` is the name of the secret to use to pull the image. The secret must be -created in the same namespace as the NGINX Ingress Controller. +--- -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/appprotect-waf/deploy.yaml -``` +### Deploying Arbitrator for NGINX App Protect DoS -{{%/tab%}} +There are two steps for deploying NGINX Ingress Controller with the NGINX App Protect DoS module: -{{%tab name="NGINX App Protect DoS"%}} +1. Build your own image and push it to your private Docker registry by following the instructions from [here](/nginx-ingress-controller/app-protect-dos/installation#Build-the-app-protect-dos-arb-Docker-Image). -Deploys NGINX Ingress Controller with the NGINX App Protect DoS module enabled. The image is pulled from the NGINX Plus -Docker registry, and the `imagePullSecretName` is the name of the secret to use to pull the image. The secret must be -created in the same namespace as the NGINX Ingress Controller. +1. Run the Arbitrator by using a Deployment and Service -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/appprotect-dos/deploy.yaml -``` + ```console + kubectl apply -f deployment/appprotect-dos-arb.yaml + kubectl apply -f service/appprotect-dos-arb-svc.yaml + ``` -{{%/tab%}} +--- -{{%tab name="Read-only filesystem"%}} +### 4.1 Running NGINX Ingress Controller -Deploys NGINX Ingress Controller with a read-only filesystem. +#### Using a Deployment -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/read-only-fs/deploy.yaml -``` +When you run NGINX Ingress Controller by using a Deployment, by default, Kubernetes will create one NGINX Ingress Controller pod. -{{%/tab%}} +For NGINX, run: -{{%tab name="NodePort"%}} +```console +kubectl apply -f deployment/nginx-ingress.yaml +``` -Deploys NGINX Ingress Controller using a Service type of `NodePort`. +For NGINX Plus, run: -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/nodeport/deploy.yaml +```console +kubectl apply -f deployment/nginx-plus-ingress.yaml ``` -{{%/tab%}} +{{}} Update the `nginx-plus-ingress.yaml` with the chosen image from the F5 Container registry; or the container image that you have built. {{}} -{{%tab name="Edge"%}} - -Deploys NGINX Ingress Controller using the `edge` tag from Docker Hub. See the -[README](https://github.com/nginxinc/kubernetes-ingress/blob/main/README.md#nginx-ingress-controller-releases) -for more information on the different tags. +--- -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/edge/deploy.yaml -``` +#### Using a DaemonSet -{{%/tab%}} +When you run the Ingress Controller by using a DaemonSet, Kubernetes will create an Ingress Controller pod on every node of the cluster. -{{%tab name="Service Insight"%}} +{{}} Read the Kubernetes [DaemonSet docs](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) to learn how to run NGINX Ingress Controller on a subset of nodes instead of on every node of the cluster.{{}} -Deploys NGINX Ingress Controller with Service Insight enabled. +For NGINX, run: -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/service-insight/deploy.yaml +```console +kubectl apply -f daemon-set/nginx-ingress.yaml ``` -{{%/tab%}} - -{{%tab name="External DNS"%}} +For NGINX Plus, run: -Deploys NGINX Ingress Controller with External DNS enabled. - -```shell -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/external-dns/deploy.yaml +```console +kubectl apply -f daemon-set/nginx-plus-ingress.yaml ``` -{{%/tab%}} - -{{}} +{{}}Update `nginx-plus-ingress.yaml` with the chosen image from the F5 Container registry; or the container image that you have built.{{}} --- @@ -230,55 +208,81 @@ kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v Run the following command to make sure that the NGINX Ingress Controller pods are running: -```shell +```console kubectl get pods --namespace=nginx-ingress ``` ## 5. Getting Access to NGINX Ingress Controller -If you deployed a DaemonSet, ports 80 and 443 of NGINX Ingress Controller container are mapped to the same ports of the -node where the container is running. To access NGINX Ingress Controller, use those ports and an IP address of any node -of the cluster where the Ingress Controller is running. +**If you created a daemonset**, ports 80 and 443 of NGINX Ingress Controller container are mapped to the same ports of the node where the container is running. To access NGINX Ingress Controller, use those ports and an IP address of any node of the cluster where the Ingress Controller is running. + +**If you created a deployment**, there are two options for accessing NGINX Ingress Controller pods: + +### 5.1 Create a Service for the NGINX Ingress Controller Pods -If you deployed a Deployment, there are two options for accessing NGINX Ingress Controller pods: +#### Using a NodePort Service + +Create a service with the type *NodePort*: + +```console +kubectl create -f service/nodeport.yaml +``` -- If the LoadBalancer type is `NodePort`, Kubernetes will randomly allocate two ports on every node of the cluster. -To access the Ingress Controller, use an IP address of any node of the cluster along with the two allocated ports. +Kubernetes will randomly allocate two ports on every node of the cluster. To access the Ingress Controller, use an IP address of any node of the cluster along with the two allocated ports. {{}} Read more about the type NodePort in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport). {{}} -- If the LoadBalancer type is `LoadBalancer`: - - For GCP or Azure, Kubernetes will allocate a cloud load balancer for load balancing the Ingress Controller pods. - Use the public IP of the load balancer to access NGINX Ingress Controller. - - For AWS, Kubernetes will allocate a Network Load Balancer (NLB) in TCP mode with the PROXY protocol enabled to pass - the client's information (the IP address and the port). +#### Using a LoadBalancer Service - {{}} For AWS, additional options regarding an allocated load balancer are available, such as its type and SSL - termination. Read the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer) to learn more. - {{}} +1. Create a service using a manifest for your cloud provider: + - For GCP or Azure, run: - Kubernetes will allocate and configure a cloud load balancer for load balancing the Ingress Controller pods. + ```shell + kubectl apply -f service/loadbalancer.yaml + ``` - Use the public IP of the load balancer to access NGINX Ingress Controller. To get the public IP: - - For GCP or Azure, run: + - For AWS, run: - ```shell - kubectl get svc nginx-ingress --namespace=nginx-ingress - ``` + ```shell + kubectl apply -f service/loadbalancer-aws-elb.yaml + ``` - - In case of AWS ELB, the public IP is not reported by `kubectl`, because the ELB IP addresses are not static. In - general, you should rely on the ELB DNS name instead of the ELB IP addresses. However, for testing purposes, you - can get the DNS name of the ELB using `kubectl describe` and then run `nslookup` to find the associated IP address: + Kubernetes will allocate a Classic Load Balancer (ELB) in TCP mode with the PROXY protocol enabled to pass the client's information (the IP address and the port). NGINX must be configured to use the PROXY protocol: + - Add the following keys to the config map file `nginx-config.yaml` from the Step 2: - ```shell - kubectl describe svc nginx-ingress --namespace=nginx-ingress - ``` + ```yaml + proxy-protocol: "True" + real-ip-header: "proxy_protocol" + set-real-ip-from: "0.0.0.0/0" + ``` - You can resolve the DNS name into an IP address using `nslookup`: + - Update the config map: - ```shell - nslookup - ``` + ```shell + kubectl apply -f common/nginx-config.yaml + ``` + + {{}} For AWS, additional options regarding an allocated load balancer are available, such as its type and SSL termination. Read the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer) to learn more. {{}} + + Kubernetes will allocate and configure a cloud load balancer for load balancing the Ingress Controller pods. +2. Use the public IP of the load balancer to access NGINX Ingress Controller. To get the public IP: + - For GCP or Azure, run: + + ```shell + kubectl get svc nginx-ingress --namespace=nginx-ingress + ``` + + - In case of AWS ELB, the public IP is not reported by `kubectl`, because the ELB IP addresses are not static. In general, you should rely on the ELB DNS name instead of the ELB IP addresses. However, for testing purposes, you can get the DNS name of the ELB using `kubectl describe` and then run `nslookup` to find the associated IP address: + + ```shell + kubectl describe svc nginx-ingress --namespace=nginx-ingress + ``` + + You can resolve the DNS name into an IP address using `nslookup`: + + ```shell + nslookup + ``` The public IP can be reported in the status of an ingress resource. See the [Reporting Resources Status doc](/nginx-ingress-controller/configuration/global-configuration/reporting-resources-status) for more details. @@ -286,8 +290,7 @@ To access the Ingress Controller, use an IP address of any node of the cluster a ## Uninstall NGINX Ingress Controller -1. Delete the `nginx-ingress` namespace to uninstall NGINX Ingress Controller along with all the auxiliary resources - that were created: +1. Delete the `nginx-ingress` namespace to uninstall NGINX Ingress Controller along with all the auxiliary resources that were created: ```shell kubectl delete namespace nginx-ingress @@ -305,5 +308,5 @@ To access the Ingress Controller, use an IP address of any node of the cluster a {{}} This step will also remove all associated Custom Resources. {{}} ```shell - kubectl delete -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/crds.yaml + kubectl delete -f common/crds/ ``` diff --git a/docs/content/installation/using-the-jwt-token-docker-secret.md b/docs/content/installation/using-the-jwt-token-docker-secret.md index e975475ea8..8ccd0bec63 100644 --- a/docs/content/installation/using-the-jwt-token-docker-secret.md +++ b/docs/content/installation/using-the-jwt-token-docker-secret.md @@ -53,7 +53,7 @@ You will need the following information from [MyF5](https://my.f5.com) for these ```shell kubectl get secret regcred --output=yaml - ``` + ``` 1. You can now use the newly created Kubernetes secret in `helm` and `manifest` deployments. @@ -71,7 +71,7 @@ spec: seccompProfile: type: RuntimeDefault containers: - - image: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:3.3.2 + - image: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:3.3.0 imagePullPolicy: IfNotPresent name: nginx-plus-ingress ``` @@ -87,7 +87,7 @@ If you are using `helm` for deployment, there are two main methods: using *sourc The [Helm installation page for NGINX Ingress Controller](https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-helm/#managing-the-chart-via-sources) has a section describing how to use sources: these are the unique steps for Docker secrets using JWT tokens. 1. Clone the NGINX [`kubernetes-ingress` repository](https://github.com/nginxinc/kubernetes-ingress). -1. Navigate to the `charts/nginx-ingress` folder of your local clone. +1. Navigate to the `deployments/helm-chart` folder of your local clone. 1. Open the `values.yaml` file in an editor. You must change a few lines NGINX Ingress Controller with NGINX Plus to be deployed. @@ -109,7 +109,7 @@ image: repository: private-registry.nginx.com/nginx-ic/nginx-plus-ingress ## The version tag - tag: 3.3.2 + tag: 3.3.0 serviceAccount: ## The annotations of the service account of the Ingress Controller pods. @@ -139,7 +139,7 @@ If the namespace does not exist, `--create-namespace` will create it. Using `-f If you want to install NGINX Ingress Controller using the charts method, the following is an example of using the command line to pass the required arguments using the `set` parameter. ```shell -helm install my-release -n nginx-ingress oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 --set controller.image.repository=private-registry.nginx.com/nginx-ic/nginx-plus-ingress --set controller.image.tag=3.3.2 --set controller.nginxplus=true --set controller.serviceAccount.imagePullSecretName=regcred +helm install my-release -n nginx-ingress oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.0 --set controller.image.repository=private-registry.nginx.com/nginx-ic/nginx-plus-ingress --set controller.image.tag=3.3.0 --set controller.nginxplus=true --set controller.serviceAccount.imagePullSecretName=regcred ``` Checking the validation that the .crts/key and .jwt are able to successfully authenticate to the repo to pull NGINX Ingress controller images: diff --git a/docs/content/troubleshooting/troubleshoot-common.md b/docs/content/troubleshooting/troubleshoot-common.md index c29e0f02eb..63ff8f31e8 100644 --- a/docs/content/troubleshooting/troubleshoot-common.md +++ b/docs/content/troubleshooting/troubleshoot-common.md @@ -145,7 +145,7 @@ controller: nginxplus: plus image: repository: nginx/nginx-ingress - tag: 3.3.2 + tag: 3.3.0 # NGINX Configmap config: entries: diff --git a/docs/content/tutorials/custom-listen-ports.md b/docs/content/tutorials/custom-listen-ports.md index 2a38a1efaf..b7fcac13ca 100644 --- a/docs/content/tutorials/custom-listen-ports.md +++ b/docs/content/tutorials/custom-listen-ports.md @@ -88,7 +88,7 @@ spec: spec: serviceAccountName: nginx-ingress containers: - - image: nginx/nginx-ingress:3.3.2 + - image: nginx/nginx-ingress:3.3.0 imagePullPolicy: IfNotPresent name: nginx-ingress ports: diff --git a/docs/content/tutorials/nginx-ingress-osm.md b/docs/content/tutorials/nginx-ingress-osm.md index 9781bba64b..440a65f5a4 100644 --- a/docs/content/tutorials/nginx-ingress-osm.md +++ b/docs/content/tutorials/nginx-ingress-osm.md @@ -15,14 +15,14 @@ Open Service Mesh will work with both versions of [F5 NGINX Ingress controller]( Below is a link to the official F5 NGINX Ingress Controller documentation. [F5 NGINX Ingress controller](https://docs.nginx.com/nginx-ingress-controller/) -## Integrating NGINX Ingress Controller with Open Service Mesh +# Integrating NGINX Ingress Controller with Open Service Mesh There are two ways to integrate the NGINX Ingress Controller with Open Service Mesh (OSM): 1. Injecting an envoy sidecar directly with NGINX Ingress Controller. 2. Using the Open Service Mesh `ingressBackend` "proxy" feature. -## NGINX Ingress controller and OSM with sidecar proxy injected +# NGINX Ingress controller and OSM with sidecar proxy injected Install OSM in the cluster @@ -49,7 +49,7 @@ osm namespace add nginx-ingress --mesh-name osm-nginx The above command will use the mark the `nginx-ingress` namespace, where OSM will be installed (sidecar) -## Install F5 NGINX Ingress controller +# Install F5 NGINX Ingress controller Links to the complete install guides: @@ -58,7 +58,7 @@ Links to the complete install guides: When using the sidecar method, ensure that you add the correct annotations listed below. This ensures proper integration of NGINX Ingress Controller with the envoy sidecar proxy. -### Helm installs +## Helm installs If using `helm`, add the following `annotation` to your `values.yaml` file: @@ -79,7 +79,7 @@ helm install nic01 nginx-stable/nginx-ingress -n nginx-ingress --create-namespac Change your `release` accordingly to match your environment. -### Manifest installs +## Manifest installs For your `manifest` deployments, add the following `annotation`. @@ -88,12 +88,6 @@ annotations: openservicemesh.io/inbound-port-exclusion-list: "80,443" ``` -or you can use the provided `yaml` file in the `deploy` directory. - -```console -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deploy/openservicemesh/deploy.yaml -``` - ### Sample deployment file with required annotation ```yaml @@ -226,7 +220,7 @@ Once OSM has been installed, this next command will mark the NGINX Ingress Contr osm namespace add nginx-ingress --mesh-name osm-nginx --disable-sidecar-injection ``` -## Install F5 NGINX Ingress controller +# Install F5 NGINX Ingress controller Links to the complete install guides: diff --git a/docs/content/usage-reporting.md b/docs/content/usage-reporting.md index 6f900d812b..5453fd56a7 100644 --- a/docs/content/usage-reporting.md +++ b/docs/content/usage-reporting.md @@ -87,7 +87,7 @@ To make the credential available to Usage Reporting, we need to create a Kuberne If you need to update the basic-auth credentials for NGINX Management Suite in the future, update the `username` and `password` fields, and apply the changes by running the command again. Usage Reporting will automatically detect the changes, using the new username and password without redeployment. -5. Download and save the deployment file [cluster-connector.yaml](https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/examples/shared-examples/usage-reporting/cluster-connector.yaml). Edit the following under the `args` section and then save the file: +5. Download and save the deployment file [cluster-connector.yaml](https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.0/examples/shared-examples/usage-reporting/cluster-connector.yaml). Edit the following under the `args` section and then save the file: ```yaml args: diff --git a/examples/custom-resources/service-insight/README.md b/examples/custom-resources/service-insight/README.md index 765c333243..09c4e8b40a 100644 --- a/examples/custom-resources/service-insight/README.md +++ b/examples/custom-resources/service-insight/README.md @@ -3,11 +3,12 @@ > The Service Insight feature is available only for F5 NGINX Plus. To use the [Service Insight](https://docs.nginx.com/nginx-ingress-controller/logging-and-monitoring/service-insight/) -feature provided by F5 NGINX Ingress Controller you must enable it by setting `serviceInsight.create` to `true` when -using the Helm Chart or by using the [manifest](../../../deploy/service-insight/deploy.yaml) depending on your +feature provided by F5 NGINX Ingress Controller you must enable it by setting `serviceInsight.create=true` in your `helm +install/upgrade...` command OR [manifest](../../../deployments/deployment/nginx-plus-ingress.yaml) depending on your preferred installation method. -The following example is an extract of the Service Insight Deployment for NGINX Ingress Controller using the manifest above: +The following example demonstrates how to enable the Service Insight for NGINX Ingress Controller using [manifests +(Deployment)](../../../deployments/deployment/nginx-plus-ingress.yaml): ```yaml apiVersion: apps/v1 @@ -31,7 +32,7 @@ spec: securityContext: ... containers: - - image: nginx-plus-ingress:3.3.2 + - image: nginx-plus-ingress:3.3.0 imagePullPolicy: IfNotPresent name: nginx-plus-ingress ports: @@ -66,11 +67,9 @@ spec: ## Deployment -Install NGINX Ingress Controller with: - -```console -kubectl apply -f https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/main/deploy/service-insight/deploy.yaml -``` +[Install NGINX Ingress +Controller](https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-manifests/), and uncomment +the `-enable-service-insight` option: this will allow Service Insight to interact with it. The examples below use the `nodeport` service. @@ -298,7 +297,7 @@ Response: ## Service Insight with TLS The following example demonstrates how to enable the Service Insight for NGINX Ingress Controller with **TLS** using -manifests (Deployment): +[manifests (Deployment)](../../../deployments/deployment/nginx-plus-ingress.yaml): ```yaml apiVersion: apps/v1 @@ -322,7 +321,7 @@ spec: securityContext: ... containers: - - image: nginx-plus-ingress:3.3.2 + - image: nginx-plus-ingress:3.3.0 imagePullPolicy: IfNotPresent name: nginx-plus-ingress ports: diff --git a/examples/helm-chart/README.md b/examples/helm-chart/README.md deleted file mode 100644 index fa968a2ca7..0000000000 --- a/examples/helm-chart/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# Helm Chart Examples - -This directory contains examples of Helm charts that can be used to deploy -NGINX Ingress Controller in a Kubernetes cluster. - -## Prerequisites - -- Helm 3.0+ - -## Examples - -- [Default](./default) - deploys the NGINX Ingress Controller with default parameters. -- [NGINX App Protect DoS](./app-protect-dos) - deploys the NGINX Ingress Controller with the NGINX App Protect DoS - module enabled. The image is pulled from the NGINX Plus Docker registry, and the `imagePullSecretName` is the name of - the secret to use to pull the image. The secret must be created in the same namespace as the NGINX Ingress Controller. -- [NGINX App Protect WAF](./app-protect-waf) - deploys the NGINX Ingress Controller with the NGINX App Protect WAF - module enabled. The image is pulled from the NGINX Plus Docker registry, and the `imagePullSecretName` is the name of - the secret to use to pull the image. The secret must be created in the same namespace as the NGINX Ingress Controller. -- [AWS NLB](./aws-nlb) - deploys the NGINX Ingress Controller using a Service type of `LoadBalancer` to allocate an AWS - Network Load Balancer (NLB). -- [Azure](./azure) - deploys the NGINX Ingress Controller using a nodeSelector to deploy the controller on Azure nodes. -- [DaemonSet](./daemonset) - deploys the NGINX Ingress Controller as a DaemonSet. -- [Edge](./edge) - deploys the NGINX Ingress Controller using the `edge` tag from Docker Hub. - See the [README](../../README.md#nginx-ingress-controller-releases) for more information on the different tags. -- [NGINX Plus](./nginx-plus) - deploys the NGINX Ingress Controller with the NGINX Plus. The image is pulled from the - NGINX Plus Docker registry, and the `imagePullSecretName` is the name of the secret to use to pull the image. - The secret must be created in the same namespace as the NGINX Ingress Controller. -- [OIDC](./oidc) - deploys the NGINX Ingress Controller with OpenID Connect (OIDC) authentication enabled. -- [Read-only filesystem](./read-only-filesystem) - deploys the NGINX Ingress Controller with a read-only filesystem. -- [NodePort](./nodeport) - deploys the NGINX Ingress Controller using a Service type of `NodePort`. -- [Service Insight](./service-insight) - deploys the NGINX Ingress Controller with Service Insight enabled. -- [External DNS](./external-dns) - deploys the NGINX Ingress Controller with External DNS enabled. - -## Manifests generation - -These examples are used to generate manifests for the NGINX Ingress Controller located in the manifest folder -[here](../../deploy). - -If you want to generate manifests for a specific example, or need to customize one of the examples, run the following -command from the root of the project: - -```shell -helm template nginx-ingress --namespace nginx-ingress --values examples/helm-chart//values.yaml charts/nginx-ingress -``` diff --git a/examples/helm-chart/app-protect-dos/values.yaml b/examples/helm-chart/app-protect-dos/values.yaml deleted file mode 100644 index d71530b7c4..0000000000 --- a/examples/helm-chart/app-protect-dos/values.yaml +++ /dev/null @@ -1,10 +0,0 @@ -controller: - name: controller - kind: deployment - nginxplus: true - appprotectdos: - enable: true - image: - repository: private-registry.nginx.com/nginx-ic-dos/nginx-plus-ingress - serviceAccount: - imagePullSecretName: nginx-registry-credentials diff --git a/examples/helm-chart/app-protect-waf/values.yaml b/examples/helm-chart/app-protect-waf/values.yaml deleted file mode 100644 index 060f6656c7..0000000000 --- a/examples/helm-chart/app-protect-waf/values.yaml +++ /dev/null @@ -1,11 +0,0 @@ -controller: - name: controller - kind: deployment - nginxplus: true - appprotect: - enable: true - logLevel: error - image: - repository: private-registry.nginx.com/nginx-ic-nap/nginx-plus-ingress - serviceAccount: - imagePullSecretName: nginx-registry-credentials diff --git a/examples/helm-chart/aws-nlb/values.yaml b/examples/helm-chart/aws-nlb/values.yaml deleted file mode 100644 index e5c8250ff2..0000000000 --- a/examples/helm-chart/aws-nlb/values.yaml +++ /dev/null @@ -1,13 +0,0 @@ -controller: - name: controller - kind: deployment - service: - type: LoadBalancer - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" - config: - entries: - proxy-protocol: "True" - real-ip-header: "proxy_protocol" - set-real-ip-from: "0.0.0.0/0" diff --git a/examples/helm-chart/azure/values.yaml b/examples/helm-chart/azure/values.yaml deleted file mode 100644 index 03547143be..0000000000 --- a/examples/helm-chart/azure/values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -controller: - name: controller - kind: deployment - nodeSelector: - kubernetes.io/os: linux diff --git a/examples/helm-chart/daemon-set/values.yaml b/examples/helm-chart/daemon-set/values.yaml deleted file mode 100644 index 7cf8214e66..0000000000 --- a/examples/helm-chart/daemon-set/values.yaml +++ /dev/null @@ -1,3 +0,0 @@ -controller: - name: controller - kind: daemonset diff --git a/examples/helm-chart/default/values.yaml b/examples/helm-chart/default/values.yaml deleted file mode 100644 index 3552bbf27c..0000000000 --- a/examples/helm-chart/default/values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -controller: - name: controller diff --git a/examples/helm-chart/edge/values.yaml b/examples/helm-chart/edge/values.yaml deleted file mode 100644 index 0e85649cfb..0000000000 --- a/examples/helm-chart/edge/values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -controller: - name: controller - kind: deployment - image: - tag: edge - service: - type: NodePort diff --git a/examples/helm-chart/external-dns/values.yaml b/examples/helm-chart/external-dns/values.yaml deleted file mode 100644 index 47166a9e17..0000000000 --- a/examples/helm-chart/external-dns/values.yaml +++ /dev/null @@ -1,3 +0,0 @@ -controller: - name: controller - enableExternalDNS: true diff --git a/examples/helm-chart/nginx-plus/values.yaml b/examples/helm-chart/nginx-plus/values.yaml deleted file mode 100644 index 8ab0626e49..0000000000 --- a/examples/helm-chart/nginx-plus/values.yaml +++ /dev/null @@ -1,8 +0,0 @@ -controller: - name: controller - kind: deployment - nginxplus: true - image: - repository: private-registry.nginx.com/nginx-ic/nginx-plus-ingress - serviceAccount: - imagePullSecretName: nginx-registry-credentials diff --git a/examples/helm-chart/nodeport/values.yaml b/examples/helm-chart/nodeport/values.yaml deleted file mode 100644 index 18922539ef..0000000000 --- a/examples/helm-chart/nodeport/values.yaml +++ /dev/null @@ -1,4 +0,0 @@ -controller: - name: controller - service: - type: NodePort diff --git a/examples/helm-chart/oidc/values.yaml b/examples/helm-chart/oidc/values.yaml deleted file mode 100644 index 3586cecf83..0000000000 --- a/examples/helm-chart/oidc/values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -controller: - name: controller - kind: deployment - nginxplus: true - enableOIDC: true - image: - repository: private-registry.nginx.com/nginx-ic/nginx-plus-ingress - serviceAccount: - imagePullSecretName: nginx-registry-credentials diff --git a/examples/helm-chart/openservicemesh/values.yaml b/examples/helm-chart/openservicemesh/values.yaml deleted file mode 100644 index 3ce77eec48..0000000000 --- a/examples/helm-chart/openservicemesh/values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -controller: - name: controller - pod: - annotations: - openservicemesh.io/inbound-port-exclusion-list: "80, 443" diff --git a/examples/helm-chart/read-only-fs/values.yaml b/examples/helm-chart/read-only-fs/values.yaml deleted file mode 100644 index b9ffdd47b7..0000000000 --- a/examples/helm-chart/read-only-fs/values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -controller: - name: controller - kind: deployment - enableCustomResources: false - readOnlyRootFilesystem: true diff --git a/examples/helm-chart/service-insight/values.yaml b/examples/helm-chart/service-insight/values.yaml deleted file mode 100644 index c69fe1babc..0000000000 --- a/examples/helm-chart/service-insight/values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -controller: - name: controller - nginxplus: true - serviceInsight: - create: true diff --git a/hack/generate-manifests.sh b/hack/generate-manifests.sh deleted file mode 100755 index 2844c5d1cf..0000000000 --- a/hack/generate-manifests.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -# Generate deployment files using Helm. This script uses the Helm chart examples in examples/helm-chart - -charts=$(find examples/helm-chart -maxdepth 1 -mindepth 1 -type d -exec basename {} \;) - -for chart in $charts; do - manifest=deploy/$chart/deploy.yaml - helm template nginx-ingress --namespace nginx-ingress --values examples/helm-chart/$chart/values.yaml --skip-crds charts/nginx-ingress >$manifest 2>/dev/null - sed -i.bak '/app.kubernetes.io\/managed-by: Helm/d' $manifest - sed -i.bak '/helm.sh/d' $manifest - cp $manifest config/base - if [ "$chart" == "app-protect-dos" ]; then - kustomize build config/overlays/app-protect-dos >$manifest - else - kustomize build config/base >$manifest - fi - rm -f config/base/deploy.yaml - rm -f $manifest.bak -done diff --git a/perf-tests/suite/test_ap_reload_perf.py b/perf-tests/suite/test_ap_reload_perf.py index bacf168f0a..83d2d76a9b 100644 --- a/perf-tests/suite/test_ap_reload_perf.py +++ b/perf-tests/suite/test_ap_reload_perf.py @@ -9,7 +9,7 @@ import requests import yaml from kubernetes.client import V1ContainerPort -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.utils.ap_resources_utils import ( create_ap_logconf_from_yaml, create_ap_policy_from_yaml, diff --git a/tests/Dockerfile b/tests/Dockerfile index b5c56976d0..05de516111 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -1,4 +1,4 @@ -# syntax=docker/dockerfile:1.6 +# syntax=docker/dockerfile:1.5 # this is here so we can grab the latest version of kind and have dependabot keep it up to date FROM kindest/node:v1.28.0 @@ -13,8 +13,8 @@ WORKDIR /workspace/tests COPY --link tests/requirements.txt /workspace/tests/ RUN pip install --require-hashes -r requirements.txt --no-deps +COPY --link deployments /workspace/deployments COPY --link config /workspace/config -COPY --link deploy /workspace/deploy RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \ && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl \ diff --git a/tests/Makefile b/tests/Makefile index cc28c176ae..23d88310ba 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -12,7 +12,7 @@ KUBE_CONFIG_FOLDER = $${HOME}/.kube KIND_KUBE_CONFIG_FOLDER = $${HOME}/.kube/kind SHOW_IC_LOGS = no PYTEST_ARGS = -DOCKERFILEPATH = Dockerfile +DOCKERFILEPATH = docker/Dockerfile IP_FAMILY=dual diff --git a/tests/settings.py b/tests/settings.py index bf4ea4ae36..ed76c72c7f 100644 --- a/tests/settings.py +++ b/tests/settings.py @@ -2,6 +2,7 @@ import os BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +DEPLOYMENTS = f"{BASEDIR}/deployments" CRDS = f"{BASEDIR}/config/crd/bases" PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) TEST_DATA = f"{PROJECT_ROOT}/data" diff --git a/tests/suite/fixtures/fixtures.py b/tests/suite/fixtures/fixtures.py index 4b37fd826d..de19a7a254 100644 --- a/tests/suite/fixtures/fixtures.py +++ b/tests/suite/fixtures/fixtures.py @@ -16,7 +16,7 @@ RbacAuthorizationV1Api, ) from kubernetes.client.rest import ApiException -from settings import ALLOWED_DEPLOYMENT_TYPES, ALLOWED_IC_TYPES, ALLOWED_SERVICE_TYPES, BASEDIR, CRDS, TEST_DATA +from settings import ALLOWED_DEPLOYMENT_TYPES, ALLOWED_IC_TYPES, ALLOWED_SERVICE_TYPES, CRDS, DEPLOYMENTS, TEST_DATA from suite.utils.custom_resources_utils import create_crd_from_yaml, delete_crd from suite.utils.kube_config_utils import ensure_context_in_config, get_current_context_name from suite.utils.resources_utils import ( @@ -228,9 +228,9 @@ def ingress_controller_prerequisites(cli_arguments, kube_apis, request) -> Ingre """ print("------------------------- Create IC Prerequisites -----------------------------------") rbac = configure_rbac(kube_apis.rbac_v1) - namespace = create_ns_and_sa_from_yaml(kube_apis.v1, f"{TEST_DATA}/common/ns-and-sa.yaml") + namespace = create_ns_and_sa_from_yaml(kube_apis.v1, f"{DEPLOYMENTS}/common/ns-and-sa.yaml") print("Create IngressClass resources:") - subprocess.run(["kubectl", "apply", "-f", f"{TEST_DATA}/common/ingress-class.yaml"]) + subprocess.run(["kubectl", "apply", "-f", f"{DEPLOYMENTS}/common/ingress-class.yaml"]) subprocess.run( [ "kubectl", @@ -239,7 +239,7 @@ def ingress_controller_prerequisites(cli_arguments, kube_apis, request) -> Ingre f"{TEST_DATA}/ingress-class/resource/custom-ingress-class-res.yaml", ] ) - config_map_yaml = f"{TEST_DATA}/common/nginx-config.yaml" + config_map_yaml = f"{DEPLOYMENTS}/common/nginx-config.yaml" create_configmap_from_yaml(kube_apis.v1, namespace, config_map_yaml) with open(config_map_yaml) as f: config_map = yaml.safe_load(f) @@ -250,7 +250,7 @@ def fin(): print("Clean up prerequisites") delete_namespace(kube_apis.v1, namespace) print("Delete IngressClass resources:") - subprocess.run(["kubectl", "delete", "-f", f"{TEST_DATA}/common/ingress-class.yaml"]) + subprocess.run(["kubectl", "delete", "-f", f"{DEPLOYMENTS}/common/ingress-class.yaml"]) subprocess.run( [ "kubectl", @@ -427,7 +427,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) request.addfinalizer(fin) diff --git a/tests/suite/fixtures/ic_fixtures.py b/tests/suite/fixtures/ic_fixtures.py index 8c0e1d0915..a29724ea71 100644 --- a/tests/suite/fixtures/ic_fixtures.py +++ b/tests/suite/fixtures/ic_fixtures.py @@ -4,7 +4,7 @@ import pytest from kubernetes.client.rest import ApiException -from settings import CRDS, TEST_DATA +from settings import CRDS, DEPLOYMENTS, TEST_DATA from suite.utils.custom_resources_utils import create_crd_from_yaml, delete_crd from suite.utils.resources_utils import ( cleanup_rbac, @@ -103,7 +103,7 @@ def crd_ingress_controller( except ApiException as ex: # Finalizer method doesn't start if fixture creation was incomplete, ensure clean up here print("Restore the ClusterRole:") - patch_rbac(kube_apis.rbac_v1, f"{TEST_DATA}/rbac/rbac.yaml") + patch_rbac(kube_apis.rbac_v1, f"{DEPLOYMENTS}/rbac/rbac.yaml") print("Remove the IC:") delete_ingress_controller(kube_apis.apps_v1_api, name, cli_arguments["deployment-type"], namespace) pytest.fail("IC setup failed") @@ -111,7 +111,7 @@ def crd_ingress_controller( def fin(): if request.config.getoption("--skip-fixture-teardown") == "no": print("Restore the ClusterRole:") - patch_rbac(kube_apis.rbac_v1, f"{TEST_DATA}/rbac/rbac.yaml") + patch_rbac(kube_apis.rbac_v1, f"{DEPLOYMENTS}/rbac/rbac.yaml") print("Remove the IC:") delete_ingress_controller(kube_apis.apps_v1_api, name, cli_arguments["deployment-type"], namespace) @@ -276,8 +276,8 @@ def crd_ingress_controller_with_dos( kube_apis.v1, kube_apis.apps_v1_api, namespace, - f"{TEST_DATA}/deployment/appprotect-dos-arb.yaml", - f"{TEST_DATA}/service/appprotect-dos-arb-svc.yaml", + f"{DEPLOYMENTS}/deployment/appprotect-dos-arb.yaml", + f"{DEPLOYMENTS}/service/appprotect-dos-arb-svc.yaml", ) print("------------------------- Create IC -----------------------------------") @@ -396,7 +396,7 @@ def crd_ingress_controller_with_ed( except ApiException as ex: # Finalizer method doesn't start if fixture creation was incomplete, ensure clean up here print("Restore the ClusterRole:") - patch_rbac(kube_apis.rbac_v1, f"{TEST_DATA}/rbac/rbac.yaml") + patch_rbac(kube_apis.rbac_v1, f"{DEPLOYMENTS}/rbac/rbac.yaml") print("Remove the DNSEndpoint CRD:") delete_crd( kube_apis.api_extensions_v1, @@ -408,14 +408,14 @@ def crd_ingress_controller_with_ed( kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) pytest.fail("IC setup failed") def fin(): if request.config.getoption("--skip-fixture-teardown") == "no": print("Restore the ClusterRole:") - patch_rbac(kube_apis.rbac_v1, f"{TEST_DATA}/rbac/rbac.yaml") + patch_rbac(kube_apis.rbac_v1, f"{DEPLOYMENTS}/rbac/rbac.yaml") print("Remove the DNSEndpoint CRD:") delete_crd( kube_apis.api_extensions_v1, @@ -427,7 +427,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) request.addfinalizer(fin) diff --git a/tests/suite/test_ac_policies.py b/tests/suite/test_ac_policies.py index a95bb44f93..0fb0771b21 100644 --- a/tests/suite/test_ac_policies.py +++ b/tests/suite/test_ac_policies.py @@ -1,6 +1,6 @@ import pytest import requests -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.utils.custom_resources_utils import read_custom_resource from suite.utils.policy_resources_utils import create_policy_from_yaml, delete_policy from suite.utils.resources_utils import ( @@ -17,7 +17,7 @@ patch_virtual_server_from_yaml, ) -std_cm_src = f"{TEST_DATA}/common/nginx-config.yaml" +std_cm_src = f"{DEPLOYMENTS}/common/nginx-config.yaml" test_cm_src = f"{TEST_DATA}/access-control/configmap/nginx-config.yaml" std_vs_src = f"{TEST_DATA}/access-control/standard/virtual-server.yaml" deny_pol_src = f"{TEST_DATA}/access-control/policies/access-control-policy-deny.yaml" diff --git a/tests/suite/test_ac_policies_vsr.py b/tests/suite/test_ac_policies_vsr.py index ff43ea212a..3d052fc4ff 100644 --- a/tests/suite/test_ac_policies_vsr.py +++ b/tests/suite/test_ac_policies_vsr.py @@ -1,12 +1,12 @@ import pytest import requests -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.utils.custom_resources_utils import read_custom_resource from suite.utils.policy_resources_utils import create_policy_from_yaml, delete_policy from suite.utils.resources_utils import replace_configmap_from_yaml, wait_before_test from suite.utils.vs_vsr_resources_utils import patch_v_s_route_from_yaml, patch_virtual_server_from_yaml -std_cm_src = f"{TEST_DATA}/common/nginx-config.yaml" +std_cm_src = f"{DEPLOYMENTS}/common/nginx-config.yaml" test_cm_src = f"{TEST_DATA}/access-control/configmap/nginx-config.yaml" std_vs_src = f"{TEST_DATA}/virtual-server-route/standard/virtual-server.yaml" deny_pol_src = f"{TEST_DATA}/access-control/policies/access-control-policy-deny.yaml" diff --git a/tests/suite/test_annotations.py b/tests/suite/test_annotations.py index 1afced1acc..e4079b8d8d 100644 --- a/tests/suite/test_annotations.py +++ b/tests/suite/test_annotations.py @@ -1,7 +1,7 @@ import pytest import yaml from kubernetes.client import NetworkingV1Api -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.fixtures.fixtures import PublicEndpoint from suite.utils.custom_assertions import assert_event_count_increased from suite.utils.resources_utils import ( @@ -145,7 +145,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_common_app(kube_apis, "simple", test_namespace) delete_items_from_yaml( diff --git a/tests/suite/test_app_protect_integration.py b/tests/suite/test_app_protect_integration.py index f9e96b6007..0866bc075a 100644 --- a/tests/suite/test_app_protect_integration.py +++ b/tests/suite/test_app_protect_integration.py @@ -1,7 +1,7 @@ import pytest import requests import yaml -from settings import CRDS, TEST_DATA +from settings import CRDS, DEPLOYMENTS, TEST_DATA from suite.utils.ap_resources_utils import ( create_ap_logconf_from_yaml, create_ap_policy_from_yaml, diff --git a/tests/suite/test_app_protect_waf_policies_grpc.py b/tests/suite/test_app_protect_waf_policies_grpc.py index aa7f9e7f13..4cb88493ea 100644 --- a/tests/suite/test_app_protect_waf_policies_grpc.py +++ b/tests/suite/test_app_protect_waf_policies_grpc.py @@ -1,6 +1,6 @@ import grpc import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.fixtures.custom_resource_fixtures import VirtualServerRoute, VirtualServerRouteSetup, VirtualServerSetup from suite.grpc.helloworld_pb2 import HelloRequest from suite.grpc.helloworld_pb2_grpc import GreeterStub @@ -155,7 +155,7 @@ def cleanup(kube_apis, ingress_controller_prerequisites, src_pol_name, test_name kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace) delete_ap_policy(kube_apis.custom_objects, ap_pol_name, test_namespace) diff --git a/tests/suite/test_custom_annotations.py b/tests/suite/test_custom_annotations.py index bcd4a0fc68..aa0491f4a3 100644 --- a/tests/suite/test_custom_annotations.py +++ b/tests/suite/test_custom_annotations.py @@ -1,5 +1,5 @@ import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.fixtures.fixtures import PublicEndpoint from suite.utils.resources_utils import ( create_items_from_yaml, @@ -60,7 +60,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_items_from_yaml(kube_apis, ing_src, test_namespace) diff --git a/tests/suite/test_default_server.py b/tests/suite/test_default_server.py index 1a420fc604..31e5da5814 100644 --- a/tests/suite/test_default_server.py +++ b/tests/suite/test_default_server.py @@ -3,7 +3,7 @@ import pytest import requests from requests.exceptions import ConnectionError -from settings import BASEDIR, TEST_DATA +from settings import BASEDIR, DEPLOYMENTS, TEST_DATA from suite.utils.resources_utils import ( create_secret_from_yaml, delete_secret, diff --git a/tests/suite/test_transport_server_external_name.py b/tests/suite/test_transport_server_external_name.py index fbd20a355a..f0f77a526b 100644 --- a/tests/suite/test_transport_server_external_name.py +++ b/tests/suite/test_transport_server_external_name.py @@ -1,5 +1,5 @@ import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.utils.custom_assertions import assert_event from suite.utils.resources_utils import ( create_items_from_yaml, @@ -64,7 +64,7 @@ def fin(): kube_apis.v1, config_map_name, ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) request.addfinalizer(fin) @@ -143,7 +143,7 @@ def test_event_warning( kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) wait_before_test(5) events = get_events(kube_apis.v1, transport_server_setup.namespace) diff --git a/tests/suite/test_transport_server_service_insight.py b/tests/suite/test_transport_server_service_insight.py index b4b7ce4d17..9bda5959f3 100644 --- a/tests/suite/test_transport_server_service_insight.py +++ b/tests/suite/test_transport_server_service_insight.py @@ -3,7 +3,7 @@ import pytest import requests -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.fixtures.fixtures import PublicEndpoint from suite.utils.custom_resources_utils import create_ts_from_yaml, delete_ts, read_ts from suite.utils.resources_utils import ( diff --git a/tests/suite/test_ts_tls_passthrough.py b/tests/suite/test_ts_tls_passthrough.py index 8ce1ead4d2..14dd36fc67 100644 --- a/tests/suite/test_ts_tls_passthrough.py +++ b/tests/suite/test_ts_tls_passthrough.py @@ -1,7 +1,7 @@ from pprint import pprint import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.fixtures.fixtures import PublicEndpoint from suite.utils.custom_resources_utils import create_ts_from_yaml, delete_ts, read_ts from suite.utils.resources_utils import ( @@ -174,7 +174,7 @@ def test_tls_passthrough_proxy_protocol_config( config = get_nginx_template_conf(kube_apis.v1, ingress_controller_prerequisites.namespace) assert f"listen {transport_server_tls_passthrough_setup.tls_passthrough_port} proxy_protocol;" in config assert f"listen [::]:{transport_server_tls_passthrough_setup.tls_passthrough_port} proxy_protocol;" in config - std_cm_src = f"{TEST_DATA}/common/nginx-config.yaml" + std_cm_src = f"{DEPLOYMENTS}/common/nginx-config.yaml" replace_configmap_from_yaml( kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], diff --git a/tests/suite/test_v_s_route_grpc.py b/tests/suite/test_v_s_route_grpc.py index eb369b016f..f30f26d3ae 100644 --- a/tests/suite/test_v_s_route_grpc.py +++ b/tests/suite/test_v_s_route_grpc.py @@ -1,5 +1,5 @@ import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.utils.custom_assertions import ( assert_event_starts_with_text_and_contains_errors, assert_grpc_entries_exist, @@ -48,7 +48,7 @@ def backend_setup(request, kube_apis, ingress_controller_prerequisites, test_nam kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_common_app(kube_apis, app_name, test_namespace) pytest.fail(f"VSR GRPC setup failed") @@ -60,7 +60,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_common_app(kube_apis, app_name, test_namespace) diff --git a/tests/suite/test_virtual_server.py b/tests/suite/test_virtual_server.py index daa379972a..4e96c764cf 100644 --- a/tests/suite/test_virtual_server.py +++ b/tests/suite/test_virtual_server.py @@ -1,5 +1,5 @@ import pytest -from settings import CRDS, TEST_DATA +from settings import CRDS, DEPLOYMENTS, TEST_DATA from suite.utils.custom_assertions import wait_and_assert_status_code from suite.utils.custom_resources_utils import create_crd_from_yaml, delete_crd from suite.utils.resources_utils import ( @@ -143,7 +143,7 @@ def test_responses_after_rbac_misconfiguration_on_the_fly( wait_and_assert_status_code(200, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host) print("Step 11: restore ClusterRole and check") - patch_rbac(kube_apis.rbac_v1, f"{TEST_DATA}/rbac/rbac.yaml") + patch_rbac(kube_apis.rbac_v1, f"{DEPLOYMENTS}/rbac/rbac.yaml") wait_before_test(1) wait_and_assert_status_code(200, virtual_server_setup.backend_1_url, virtual_server_setup.vs_host) wait_and_assert_status_code(200, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host) @@ -225,6 +225,6 @@ def test_responses_after_rbac_misconfiguration(self, kube_apis, crd_ingress_cont wait_and_assert_status_code(404, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host) print("Step 2: configure RBAC and check") - patch_rbac(kube_apis.rbac_v1, f"{TEST_DATA}/rbac/rbac.yaml") + patch_rbac(kube_apis.rbac_v1, f"{DEPLOYMENTS}/rbac/rbac.yaml") wait_and_assert_status_code(200, virtual_server_setup.backend_1_url, virtual_server_setup.vs_host) wait_and_assert_status_code(200, virtual_server_setup.backend_2_url, virtual_server_setup.vs_host) diff --git a/tests/suite/test_virtual_server_configmap_keys.py b/tests/suite/test_virtual_server_configmap_keys.py index 26644dbcd2..1fe9ceb276 100644 --- a/tests/suite/test_virtual_server_configmap_keys.py +++ b/tests/suite/test_virtual_server_configmap_keys.py @@ -1,5 +1,5 @@ import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.utils.resources_utils import ( get_events, get_file_contents, @@ -142,7 +142,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) request.addfinalizer(fin) diff --git a/tests/suite/test_virtual_server_grpc.py b/tests/suite/test_virtual_server_grpc.py index f15106c606..36760127a1 100644 --- a/tests/suite/test_virtual_server_grpc.py +++ b/tests/suite/test_virtual_server_grpc.py @@ -1,6 +1,6 @@ import grpc import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.grpc.helloworld_pb2 import HelloRequest from suite.grpc.helloworld_pb2_grpc import GreeterStub from suite.utils.custom_assertions import ( @@ -59,7 +59,7 @@ def backend_setup(request, kube_apis, ingress_controller_prerequisites, test_nam kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_common_app(kube_apis, app_name, test_namespace) pytest.fail(f"VS GRPC setup failed") @@ -72,7 +72,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_common_app(kube_apis, app_name, test_namespace) diff --git a/tests/suite/test_virtual_server_mixed_grpc.py b/tests/suite/test_virtual_server_mixed_grpc.py index d664f402b5..267a9fb78f 100644 --- a/tests/suite/test_virtual_server_mixed_grpc.py +++ b/tests/suite/test_virtual_server_mixed_grpc.py @@ -1,6 +1,6 @@ import grpc import pytest -from settings import TEST_DATA +from settings import DEPLOYMENTS, TEST_DATA from suite.grpc.helloworld_pb2 import HelloRequest from suite.grpc.helloworld_pb2_grpc import GreeterStub from suite.utils.custom_assertions import ( @@ -54,7 +54,7 @@ def backend_setup(request, kube_apis, ingress_controller_prerequisites, test_nam kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_common_app(kube_apis, app_name, test_namespace) pytest.fail(f"VS GRPC setup failed") @@ -67,7 +67,7 @@ def fin(): kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, - f"{TEST_DATA}/common/nginx-config.yaml", + f"{DEPLOYMENTS}/common/nginx-config.yaml", ) delete_common_app(kube_apis, app_name, test_namespace) diff --git a/tests/suite/utils/resources_utils.py b/tests/suite/utils/resources_utils.py index 0bc229b02b..21c9a19297 100644 --- a/tests/suite/utils/resources_utils.py +++ b/tests/suite/utils/resources_utils.py @@ -12,7 +12,7 @@ from kubernetes.client.rest import ApiException from kubernetes.stream import stream from more_itertools import first -from settings import PROJECT_ROOT, RECONFIGURATION_DELAY, TEST_DATA +from settings import DEPLOYMENTS, PROJECT_ROOT, RECONFIGURATION_DELAY, TEST_DATA from suite.utils.ssl_utils import create_sni_session @@ -37,7 +37,7 @@ def configure_rbac(rbac_v1: RbacAuthorizationV1Api) -> RBACAuthorization: :param rbac_v1: RbacAuthorizationV1Api :return: RBACAuthorization """ - with open(f"{TEST_DATA}/rbac/rbac.yaml") as f: + with open(f"{DEPLOYMENTS}/rbac/rbac.yaml") as f: docs = yaml.safe_load_all(f) role_name = "" binding_name = "" @@ -61,7 +61,7 @@ def configure_rbac_with_ap(rbac_v1: RbacAuthorizationV1Api) -> RBACAuthorization :param rbac_v1: RbacAuthorizationV1Api :return: RBACAuthorization """ - with open(f"{TEST_DATA}/rbac/ap-rbac.yaml") as f: + with open(f"{DEPLOYMENTS}/rbac/ap-rbac.yaml") as f: docs = yaml.safe_load_all(f) role_name = "" binding_name = "" @@ -85,7 +85,7 @@ def configure_rbac_with_dos(rbac_v1: RbacAuthorizationV1Api) -> RBACAuthorizatio :param rbac_v1: RbacAuthorizationV1Api :return: RBACAuthorization """ - with open(f"{TEST_DATA}/rbac/apdos-rbac.yaml") as f: + with open(f"{DEPLOYMENTS}/rbac/apdos-rbac.yaml") as f: docs = yaml.safe_load_all(f) role_name = "" binding_name = "" @@ -1138,7 +1138,7 @@ def create_ingress_controller(v1: CoreV1Api, apps_v1_api: AppsV1Api, cli_argumen :return: str """ print(f"Create an Ingress Controller as {cli_arguments['ic-type']}") - yaml_manifest = f"{TEST_DATA}/{cli_arguments['deployment-type']}/{cli_arguments['ic-type']}.yaml" + yaml_manifest = f"{DEPLOYMENTS}/{cli_arguments['deployment-type']}/{cli_arguments['ic-type']}.yaml" with open(yaml_manifest) as f: dep = yaml.safe_load(f) dep["spec"]["replicas"] = int(cli_arguments["replicas"]) From 14673e9ed08705b2c703f7988c33e77617cf8038 Mon Sep 17 00:00:00 2001 From: Shaun Date: Tue, 7 Nov 2023 11:46:16 +0000 Subject: [PATCH 4/4] Graduate TransportServer and GlobalConfiguration to v1 (#4574) --- .../k8s.nginx.org_globalconfigurations.yaml | 41 +- .../bases/k8s.nginx.org_transportservers.yaml | 165 +++++++- .../bases/k8s.nginx.org_virtualservers.yaml | 4 +- deploy/crds.yaml | 210 +++++++++- .../globalconfiguration-resource.md | 4 +- .../handling-host-and-listener-collisions.md | 4 +- .../configuration/transportserver-resource.md | 12 +- ...rtual-server-with-custom-listener-ports.md | 2 +- .../basic-tcp-udp/global-configuration.yaml | 2 +- .../basic-tcp-udp/transport-server-tcp.yaml | 2 +- .../basic-tcp-udp/transport-server-udp.yaml | 2 +- .../transport-server-passthrough.yaml | 2 +- internal/configs/config_params.go | 6 +- internal/configs/configurator.go | 7 +- internal/configs/configurator_test.go | 25 +- internal/configs/transportserver.go | 20 +- internal/configs/transportserver_test.go | 194 ++++----- internal/configs/virtualserver_test.go | 6 +- internal/k8s/configuration.go | 29 +- internal/k8s/configuration_test.go | 73 ++-- internal/k8s/controller.go | 23 +- internal/k8s/controller_test.go | 5 +- internal/k8s/handlers.go | 18 +- internal/k8s/reference_checkers.go | 41 +- internal/k8s/reference_checkers_test.go | 21 +- internal/k8s/status.go | 17 +- internal/k8s/status_test.go | 41 +- internal/k8s/task_queue.go | 5 +- pkg/apis/configuration/v1/register.go | 4 + pkg/apis/configuration/v1/types.go | 181 ++++++++- .../configuration/v1/zz_generated.deepcopy.go | 378 +++++++++++++++++- pkg/apis/configuration/v1alpha1/types.go | 84 ++-- .../v1alpha1/zz_generated.deepcopy.go | 154 +++---- .../validation/globalconfiguration.go | 12 +- .../validation/globalconfiguration_test.go | 42 +- .../validation/transportserver.go | 38 +- .../validation/transportserver_test.go | 118 +++--- .../configuration/v1/configuration_client.go | 10 + .../v1/fake/fake_configuration_client.go | 8 + .../v1/fake/fake_globalconfiguration.go | 113 ++++++ .../v1/fake/fake_transportserver.go | 125 ++++++ .../configuration/v1/generated_expansion.go | 4 + .../configuration/v1/globalconfiguration.go | 162 ++++++++ .../typed/configuration/v1/transportserver.go | 179 +++++++++ .../configuration/v1/globalconfiguration.go | 74 ++++ .../configuration/v1/interface.go | 14 + .../configuration/v1/transportserver.go | 74 ++++ .../informers/externalversions/generic.go | 4 + .../configuration/v1/expansion_generated.go | 16 + .../configuration/v1/globalconfiguration.go | 83 ++++ .../configuration/v1/transportserver.go | 83 ++++ .../global-configuration.yaml | 2 +- .../transport-server/passthrough.yaml | 2 +- .../data/prometheus/transport-server/tcp.yaml | 2 +- .../data/prometheus/transport-server/udp.yaml | 2 +- .../standard/global-configuration.yaml | 2 +- .../standard/transport-server.yaml | 2 +- .../rejected-invalid.yaml | 2 +- .../rejected-warning.yaml | 2 +- .../standard/global-configuration.yaml | 2 +- .../standard/transport-server.yaml | 2 +- .../failing-hc-transport-server.yaml | 2 +- .../max-connections-transport-server.yaml | 2 +- .../method-transport-server.yaml | 2 +- .../missing-service-transport-server.yaml | 2 +- .../passing-hc-transport-server.yaml | 2 +- .../second-transport-server.yaml | 2 +- .../standard/global-configuration.yaml | 2 +- .../standard/transport-server.yaml | 2 +- .../transport-server-tls.yaml | 2 +- .../wrong-port-transport-server.yaml | 2 +- .../standard/transport-server.yaml | 2 +- .../transport-server-same-host.yaml | 2 +- .../failing-hc-transport-server.yaml | 2 +- .../missing-service-transport-server.yaml | 2 +- .../passing-hc-transport-server.yaml | 2 +- .../second-transport-server.yaml | 2 +- .../standard/global-configuration.yaml | 2 +- .../standard/transport-server.yaml | 2 +- .../wrong-port-transport-server.yaml | 2 +- ...ransport-server-configurable-timeouts.yaml | 2 +- .../transport-server-snippets.yaml | 2 +- ...-configuration-http-listener-with-ssl.yaml | 2 +- ...figuration-https-listener-without-ssl.yaml | 2 +- ...obal-configuration-missing-http-https.yaml | 2 +- .../global-configuration-missing-http.yaml | 2 +- .../global-configuration-missing-https.yaml | 2 +- .../global-configuration.yaml | 2 +- tests/suite/utils/custom_resources_utils.py | 18 +- 89 files changed, 2438 insertions(+), 595 deletions(-) create mode 100644 pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_globalconfiguration.go create mode 100644 pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_transportserver.go create mode 100644 pkg/client/clientset/versioned/typed/configuration/v1/globalconfiguration.go create mode 100644 pkg/client/clientset/versioned/typed/configuration/v1/transportserver.go create mode 100644 pkg/client/informers/externalversions/configuration/v1/globalconfiguration.go create mode 100644 pkg/client/informers/externalversions/configuration/v1/transportserver.go create mode 100644 pkg/client/listers/configuration/v1/globalconfiguration.go create mode 100644 pkg/client/listers/configuration/v1/transportserver.go diff --git a/config/crd/bases/k8s.nginx.org_globalconfigurations.yaml b/config/crd/bases/k8s.nginx.org_globalconfigurations.yaml index 29c5697421..e5695ddd8f 100644 --- a/config/crd/bases/k8s.nginx.org_globalconfigurations.yaml +++ b/config/crd/bases/k8s.nginx.org_globalconfigurations.yaml @@ -16,7 +16,7 @@ spec: singular: globalconfiguration scope: Namespaced versions: - - name: v1alpha1 + - name: v1 schema: openAPIV3Schema: description: GlobalConfiguration defines the GlobalConfiguration resource. @@ -55,3 +55,42 @@ spec: type: object served: true storage: true + - name: v1alpha1 + schema: + openAPIV3Schema: + description: GlobalConfiguration defines the GlobalConfiguration resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalConfigurationSpec is the spec of the GlobalConfiguration + resource. + properties: + listeners: + items: + description: Listener defines a listener. + properties: + name: + type: string + port: + type: integer + protocol: + type: string + ssl: + type: boolean + type: object + type: array + type: object + type: object + served: true + storage: false diff --git a/config/crd/bases/k8s.nginx.org_transportservers.yaml b/config/crd/bases/k8s.nginx.org_transportservers.yaml index 1a8740cc0d..81a11524a7 100644 --- a/config/crd/bases/k8s.nginx.org_transportservers.yaml +++ b/config/crd/bases/k8s.nginx.org_transportservers.yaml @@ -28,7 +28,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1 schema: openAPIV3Schema: description: TransportServer defines the TransportServer resource. @@ -49,7 +49,7 @@ spec: description: TransportServerSpec is the spec of the TransportServer resource. properties: action: - description: Action defines an action. + description: TransportServerAction defines an action. properties: pass: type: string @@ -77,7 +77,8 @@ spec: streamSnippets: type: string tls: - description: TLS defines TLS configuration for a TransportServer. + description: TransportServerTLS defines TransportServerTLS configuration + for a TransportServer. properties: secret: type: string @@ -100,13 +101,13 @@ spec: type: object upstreams: items: - description: Upstream defines an upstream. + description: TransportServerUpstream defines an upstream. properties: failTimeout: type: string healthCheck: - description: HealthCheck defines the parameters for active Upstream - HealthChecks. + description: TransportServerHealthCheck defines the parameters + for active Upstream HealthChecks. properties: enable: type: boolean @@ -117,8 +118,8 @@ spec: jitter: type: string match: - description: Match defines the parameters of a custom health - check. + description: TransportServerMatch defines the parameters + of a custom health check. properties: expect: type: string @@ -163,3 +164,151 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - description: Current state of the TransportServer. If the resource has a valid + status, it means it has been validated and accepted by the Ingress Controller. + jsonPath: .status.state + name: State + type: string + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TransportServer defines the TransportServer resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TransportServerSpec is the spec of the TransportServer resource. + properties: + action: + description: TransportServerAction defines an action. + properties: + pass: + type: string + type: object + host: + type: string + ingressClassName: + type: string + listener: + description: TransportServerListener defines a listener for a TransportServer. + properties: + name: + type: string + protocol: + type: string + type: object + serverSnippets: + type: string + sessionParameters: + description: SessionParameters defines session parameters. + properties: + timeout: + type: string + type: object + streamSnippets: + type: string + tls: + description: TransportServerTLS defines TransportServerTLS configuration + for a TransportServer. + properties: + secret: + type: string + type: object + upstreamParameters: + description: UpstreamParameters defines parameters for an upstream. + properties: + connectTimeout: + type: string + nextUpstream: + type: boolean + nextUpstreamTimeout: + type: string + nextUpstreamTries: + type: integer + udpRequests: + type: integer + udpResponses: + type: integer + type: object + upstreams: + items: + description: TransportServerUpstream defines an upstream. + properties: + failTimeout: + type: string + healthCheck: + description: TransportServerHealthCheck defines the parameters + for active Upstream HealthChecks. + properties: + enable: + type: boolean + fails: + type: integer + interval: + type: string + jitter: + type: string + match: + description: TransportServerMatch defines the parameters + of a custom health check. + properties: + expect: + type: string + send: + type: string + type: object + passes: + type: integer + port: + type: integer + timeout: + type: string + type: object + loadBalancingMethod: + type: string + maxConns: + type: integer + maxFails: + type: integer + name: + type: string + port: + type: integer + service: + type: string + type: object + type: array + type: object + status: + description: TransportServerStatus defines the status for the TransportServer + resource. + properties: + message: + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} diff --git a/config/crd/bases/k8s.nginx.org_virtualservers.yaml b/config/crd/bases/k8s.nginx.org_virtualservers.yaml index 2c2acef41e..a0a676be0a 100644 --- a/config/crd/bases/k8s.nginx.org_virtualservers.yaml +++ b/config/crd/bases/k8s.nginx.org_virtualservers.yaml @@ -105,8 +105,8 @@ spec: routing. type: boolean listener: - description: Listener references a custom http and/or https listener - defined in GlobalConfiguration. + description: VirtualServerListener references a custom http and/or + https listener defined in GlobalConfiguration. properties: http: type: string diff --git a/deploy/crds.yaml b/deploy/crds.yaml index 5edfadffa7..d1d588283f 100644 --- a/deploy/crds.yaml +++ b/deploy/crds.yaml @@ -107,7 +107,7 @@ spec: singular: globalconfiguration scope: Namespaced versions: - - name: v1alpha1 + - name: v1 schema: openAPIV3Schema: description: GlobalConfiguration defines the GlobalConfiguration resource. @@ -146,6 +146,45 @@ spec: type: object served: true storage: true + - name: v1alpha1 + schema: + openAPIV3Schema: + description: GlobalConfiguration defines the GlobalConfiguration resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalConfigurationSpec is the spec of the GlobalConfiguration + resource. + properties: + listeners: + items: + description: Listener defines a listener. + properties: + name: + type: string + port: + type: integer + protocol: + type: string + ssl: + type: boolean + type: object + type: array + type: object + type: object + served: true + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -498,7 +537,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1 schema: openAPIV3Schema: description: TransportServer defines the TransportServer resource. @@ -519,7 +558,7 @@ spec: description: TransportServerSpec is the spec of the TransportServer resource. properties: action: - description: Action defines an action. + description: TransportServerAction defines an action. properties: pass: type: string @@ -547,7 +586,8 @@ spec: streamSnippets: type: string tls: - description: TLS defines TLS configuration for a TransportServer. + description: TransportServerTLS defines TransportServerTLS configuration + for a TransportServer. properties: secret: type: string @@ -570,13 +610,13 @@ spec: type: object upstreams: items: - description: Upstream defines an upstream. + description: TransportServerUpstream defines an upstream. properties: failTimeout: type: string healthCheck: - description: HealthCheck defines the parameters for active Upstream - HealthChecks. + description: TransportServerHealthCheck defines the parameters + for active Upstream HealthChecks. properties: enable: type: boolean @@ -587,8 +627,8 @@ spec: jitter: type: string match: - description: Match defines the parameters of a custom health - check. + description: TransportServerMatch defines the parameters + of a custom health check. properties: expect: type: string @@ -633,6 +673,154 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - description: Current state of the TransportServer. If the resource has a valid + status, it means it has been validated and accepted by the Ingress Controller. + jsonPath: .status.state + name: State + type: string + - jsonPath: .status.reason + name: Reason + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TransportServer defines the TransportServer resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TransportServerSpec is the spec of the TransportServer resource. + properties: + action: + description: TransportServerAction defines an action. + properties: + pass: + type: string + type: object + host: + type: string + ingressClassName: + type: string + listener: + description: TransportServerListener defines a listener for a TransportServer. + properties: + name: + type: string + protocol: + type: string + type: object + serverSnippets: + type: string + sessionParameters: + description: SessionParameters defines session parameters. + properties: + timeout: + type: string + type: object + streamSnippets: + type: string + tls: + description: TransportServerTLS defines TransportServerTLS configuration + for a TransportServer. + properties: + secret: + type: string + type: object + upstreamParameters: + description: UpstreamParameters defines parameters for an upstream. + properties: + connectTimeout: + type: string + nextUpstream: + type: boolean + nextUpstreamTimeout: + type: string + nextUpstreamTries: + type: integer + udpRequests: + type: integer + udpResponses: + type: integer + type: object + upstreams: + items: + description: TransportServerUpstream defines an upstream. + properties: + failTimeout: + type: string + healthCheck: + description: TransportServerHealthCheck defines the parameters + for active Upstream HealthChecks. + properties: + enable: + type: boolean + fails: + type: integer + interval: + type: string + jitter: + type: string + match: + description: TransportServerMatch defines the parameters + of a custom health check. + properties: + expect: + type: string + send: + type: string + type: object + passes: + type: integer + port: + type: integer + timeout: + type: string + type: object + loadBalancingMethod: + type: string + maxConns: + type: integer + maxFails: + type: integer + name: + type: string + port: + type: integer + service: + type: string + type: object + type: array + type: object + status: + description: TransportServerStatus defines the status for the TransportServer + resource. + properties: + message: + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -1420,8 +1608,8 @@ spec: routing. type: boolean listener: - description: Listener references a custom http and/or https listener - defined in GlobalConfiguration. + description: VirtualServerListener references a custom http and/or + https listener defined in GlobalConfiguration. properties: http: type: string diff --git a/docs/content/configuration/global-configuration/globalconfiguration-resource.md b/docs/content/configuration/global-configuration/globalconfiguration-resource.md index baeb9fd838..bfa29d6077 100644 --- a/docs/content/configuration/global-configuration/globalconfiguration-resource.md +++ b/docs/content/configuration/global-configuration/globalconfiguration-resource.md @@ -23,7 +23,7 @@ When [installing](/nginx-ingress-controller/installation/installation-with-manif The GlobalConfiguration resource defines the global configuration parameters of the Ingress Controller. Below is an example: ```yaml -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration @@ -111,7 +111,7 @@ If you try to create (or update) a resource that violates the structural schema ``` $ kubectl apply -f global-configuration.yaml - error: error validating "global-configuration.yaml": error validating data: ValidationError(GlobalConfiguration.spec.listeners[0].port): invalid type for org.nginx.k8s.v1alpha1.GlobalConfiguration.spec.listeners.port: got "string", expected "integer"; if you choose to ignore these errors, turn validation off with --validate=false + error: error validating "global-configuration.yaml": error validating data: ValidationError(GlobalConfiguration.spec.listeners[0].port): invalid type for org.nginx.k8s.v1.GlobalConfiguration.spec.listeners.port: got "string", expected "integer"; if you choose to ignore these errors, turn validation off with --validate=false ``` - Example of Kubernetes API server validation: diff --git a/docs/content/configuration/handling-host-and-listener-collisions.md b/docs/content/configuration/handling-host-and-listener-collisions.md index 0452489190..8d17616913 100644 --- a/docs/content/configuration/handling-host-and-listener-collisions.md +++ b/docs/content/configuration/handling-host-and-listener-collisions.md @@ -102,7 +102,7 @@ Consider the following two resources: - `tcp-1` TransportServer: ```yaml - apiVersion: k8s.nginx.org/v1alpha1 + apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: tcp-1 @@ -116,7 +116,7 @@ Consider the following two resources: - `tcp-2` TransportServer: ```yaml - apiVersion: k8s.nginx.org/v1alpha1 + apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: tcp-2 diff --git a/docs/content/configuration/transportserver-resource.md b/docs/content/configuration/transportserver-resource.md index e3967f0bb9..bfed15b477 100644 --- a/docs/content/configuration/transportserver-resource.md +++ b/docs/content/configuration/transportserver-resource.md @@ -24,7 +24,7 @@ The TransportServer resource defines load balancing configuration for TCP, UDP, - TCP load balancing: ```yaml - apiVersion: k8s.nginx.org/v1alpha1 + apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: dns-tcp @@ -45,7 +45,7 @@ The TransportServer resource defines load balancing configuration for TCP, UDP, - UDP load balancing: ```yaml - apiVersion: k8s.nginx.org/v1alpha1 + apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: dns-udp @@ -67,7 +67,7 @@ The TransportServer resource defines load balancing configuration for TCP, UDP, - TLS passthrough load balancing: ```yaml - apiVersion: k8s.nginx.org/v1alpha1 + apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: secure-app @@ -299,7 +299,7 @@ In the kubectl get and similar commands, you can also use the short name `ts` in Snippets allow you to insert raw NGINX config into different contexts of NGINX configuration. In the example below, we use snippets to configure [access control](http://nginx.org/en/docs/stream/ngx_stream_access_module.html) in a TransportServer: ```yaml -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: cafe @@ -317,7 +317,7 @@ spec: Snippets can also be specified for a stream. In the example below, we use snippets to [limit the number of connections](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html): ```yaml -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: cafe @@ -365,7 +365,7 @@ If you try to create (or update) a resource that violates the structural schema ```console kubectl apply -f transport-server-passthrough.yaml - error: error validating "transport-server-passthrough.yaml": error validating data: ValidationError(TransportServer.spec.upstreams[0].port): invalid type for org.nginx.k8s.v1alpha1.TransportServer.spec.upstreams.port: got "string", expected "integer"; if you choose to ignore these errors, turn validation off with --validate=false + error: error validating "transport-server-passthrough.yaml": error validating data: ValidationError(TransportServer.spec.upstreams[0].port): invalid type for org.nginx.k8s.v1.TransportServer.spec.upstreams.port: got "string", expected "integer"; if you choose to ignore these errors, turn validation off with --validate=false ``` - Example of Kubernetes API server validation: diff --git a/docs/content/tutorials/virtual-server-with-custom-listener-ports.md b/docs/content/tutorials/virtual-server-with-custom-listener-ports.md index 92dc50c9dd..a0badbe88f 100644 --- a/docs/content/tutorials/virtual-server-with-custom-listener-ports.md +++ b/docs/content/tutorials/virtual-server-with-custom-listener-ports.md @@ -15,7 +15,7 @@ Each field must reference a valid listener defined by in a [GlobalConfiguration] 1. Create a yaml file called `nginx-configuration.yaml` with the below content: ```yaml -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/examples/custom-resources/basic-tcp-udp/global-configuration.yaml b/examples/custom-resources/basic-tcp-udp/global-configuration.yaml index 66bbd0632d..56353d8586 100644 --- a/examples/custom-resources/basic-tcp-udp/global-configuration.yaml +++ b/examples/custom-resources/basic-tcp-udp/global-configuration.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/examples/custom-resources/basic-tcp-udp/transport-server-tcp.yaml b/examples/custom-resources/basic-tcp-udp/transport-server-tcp.yaml index c8f389eedc..1bb3a0c4c1 100644 --- a/examples/custom-resources/basic-tcp-udp/transport-server-tcp.yaml +++ b/examples/custom-resources/basic-tcp-udp/transport-server-tcp.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: dns-tcp diff --git a/examples/custom-resources/basic-tcp-udp/transport-server-udp.yaml b/examples/custom-resources/basic-tcp-udp/transport-server-udp.yaml index 4b273a721e..c88daf137f 100644 --- a/examples/custom-resources/basic-tcp-udp/transport-server-udp.yaml +++ b/examples/custom-resources/basic-tcp-udp/transport-server-udp.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: dns-udp diff --git a/examples/custom-resources/tls-passthrough/transport-server-passthrough.yaml b/examples/custom-resources/tls-passthrough/transport-server-passthrough.yaml index 82b0d84f0d..20778b94f6 100644 --- a/examples/custom-resources/tls-passthrough/transport-server-passthrough.yaml +++ b/examples/custom-resources/tls-passthrough/transport-server-passthrough.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: secure-app diff --git a/internal/configs/config_params.go b/internal/configs/config_params.go index 9e8b4a9479..3d6ee225a5 100644 --- a/internal/configs/config_params.go +++ b/internal/configs/config_params.go @@ -1,6 +1,6 @@ package configs -import conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" +import conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" // ConfigParams holds NGINX configuration parameters that affect the main NGINX config // as well as configs for Ingress resources. @@ -196,8 +196,8 @@ func NewDefaultGlobalConfigParams() *GlobalConfigParams { func NewGlobalConfigParamsWithTLSPassthrough() *GlobalConfigParams { return &GlobalConfigParams{ Listeners: map[string]Listener{ - conf_v1alpha1.TLSPassthroughListenerName: { - Protocol: conf_v1alpha1.TLSPassthroughListenerProtocol, + conf_v1.TLSPassthroughListenerName: { + Protocol: conf_v1.TLSPassthroughListenerProtocol, }, }, } diff --git a/internal/configs/configurator.go b/internal/configs/configurator.go index 139404a505..d1b397ee92 100644 --- a/internal/configs/configurator.go +++ b/internal/configs/configurator.go @@ -14,7 +14,6 @@ import ( "github.com/spiffe/go-spiffe/v2/workloadapi" "github.com/nginxinc/kubernetes-ingress/internal/configs/version2" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" "github.com/golang/glog" api_v1 "k8s.io/api/core/v1" @@ -323,7 +322,7 @@ func (cnf *Configurator) StreamUpstreamsForName(name string) []string { // transportServerForActionName takes an action name and returns // Transport Server obj associated with that name. -func (cnf *Configurator) transportServerForActionName(name string) *conf_v1alpha1.TransportServer { +func (cnf *Configurator) transportServerForActionName(name string) *conf_v1.TransportServer { for _, tsEx := range cnf.transportServers { glog.V(3).Infof("Check ts action '%s' for requested name: '%s'", tsEx.TransportServer.Spec.Action.Pass, name) if tsEx.TransportServer.Spec.Action.Pass == name { @@ -335,7 +334,7 @@ func (cnf *Configurator) transportServerForActionName(name string) *conf_v1alpha // streamUpstreamsForTransportServer takes TransportServer obj and returns // a list of stream upstreams associated with this TransportServer. -func (cnf *Configurator) streamUpstreamsForTransportServer(ts *conf_v1alpha1.TransportServer) []string { +func (cnf *Configurator) streamUpstreamsForTransportServer(ts *conf_v1.TransportServer) []string { upstreamNames := make([]string, 0, len(ts.Spec.Upstreams)) n := newUpstreamNamerForTransportServer(ts) for _, u := range ts.Spec.Upstreams { @@ -1350,7 +1349,7 @@ func getFileNameForVirtualServer(virtualServer *conf_v1.VirtualServer) string { return fmt.Sprintf("vs_%s_%s", virtualServer.Namespace, virtualServer.Name) } -func getFileNameForTransportServer(transportServer *conf_v1alpha1.TransportServer) string { +func getFileNameForTransportServer(transportServer *conf_v1.TransportServer) string { return fmt.Sprintf("ts_%s_%s", transportServer.Namespace, transportServer.Name) } diff --git a/internal/configs/configurator_test.go b/internal/configs/configurator_test.go index b6ad8ec1bc..c8f9d6c912 100644 --- a/internal/configs/configurator_test.go +++ b/internal/configs/configurator_test.go @@ -15,7 +15,6 @@ import ( "github.com/nginxinc/kubernetes-ingress/internal/configs/version2" "github.com/nginxinc/kubernetes-ingress/internal/nginx" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" ) func createTestStaticConfigParams() *StaticConfigParams { @@ -234,7 +233,7 @@ func TestGetFileNameForVirtualServerFromKey(t *testing.T) { func TestGetFileNameForTransportServer(t *testing.T) { t.Parallel() - transportServer := &conf_v1alpha1.TransportServer{ + transportServer := &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Namespace: "default", Name: "test-server", @@ -863,13 +862,13 @@ func TestUpdateTransportServerMetricsLabels(t *testing.T) { cnf.labelUpdater = newFakeLabelUpdater() tsEx := &TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "test-transportserver", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "dns-tcp", Protocol: "TCP", }, @@ -994,13 +993,13 @@ func TestUpdateTransportServerMetricsLabels(t *testing.T) { } tsExTLS := &TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "test-transportserver-tls", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "TLS_PASSTHROUGH", }, @@ -1374,25 +1373,25 @@ var ( }, } validTransportServerExWithUpstreams = &TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "secure-app", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "TLS_PASSTHROUGH", }, Host: "example.com", - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "secure-app", Service: "secure-app", Port: 8443, }, }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "secure-app", }, }, diff --git a/internal/configs/transportserver.go b/internal/configs/transportserver.go index 36a848f1c0..ff2976c77e 100644 --- a/internal/configs/transportserver.go +++ b/internal/configs/transportserver.go @@ -8,7 +8,7 @@ import ( "github.com/nginxinc/kubernetes-ingress/internal/configs/version2" "github.com/nginxinc/kubernetes-ingress/internal/k8s/secrets" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" + conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" ) const nginxNonExistingUnixSocket = "unix:/var/lib/nginx/non-existing-unix-socket.sock" @@ -16,7 +16,7 @@ const nginxNonExistingUnixSocket = "unix:/var/lib/nginx/non-existing-unix-socket // TransportServerEx holds a TransportServer along with the resources referenced by it. type TransportServerEx struct { ListenerPort int - TransportServer *conf_v1alpha1.TransportServer + TransportServer *conf_v1.TransportServer Endpoints map[string][]string PodsByIP map[string]string ExternalNameSvcs map[string]bool @@ -34,7 +34,7 @@ func (tsEx *TransportServerEx) String() string { return fmt.Sprintf("%s/%s", tsEx.TransportServer.Namespace, tsEx.TransportServer.Name) } -func newUpstreamNamerForTransportServer(transportServer *conf_v1alpha1.TransportServer) *upstreamNamer { +func newUpstreamNamerForTransportServer(transportServer *conf_v1.TransportServer) *upstreamNamer { return &upstreamNamer{ prefix: fmt.Sprintf("ts_%s_%s", transportServer.Namespace, transportServer.Name), } @@ -83,13 +83,13 @@ func generateTransportServerConfig(transportServerEx *TransportServerEx, listene streamSnippets := generateSnippets(true, transportServerEx.TransportServer.Spec.StreamSnippets, []string{}) statusZone := transportServerEx.TransportServer.Spec.Listener.Name - if transportServerEx.TransportServer.Spec.Listener.Name == conf_v1alpha1.TLSPassthroughListenerName { + if transportServerEx.TransportServer.Spec.Listener.Name == conf_v1.TLSPassthroughListenerName { statusZone = transportServerEx.TransportServer.Spec.Host } tsConfig := &version2.TransportServerConfig{ Server: version2.StreamServer{ - TLSPassthrough: transportServerEx.TransportServer.Spec.Listener.Name == conf_v1alpha1.TLSPassthroughListenerName, + TLSPassthrough: transportServerEx.TransportServer.Spec.Listener.Name == conf_v1.TLSPassthroughListenerName, UnixSocket: generateUnixSocket(transportServerEx), Port: listenerPort, UDP: transportServerEx.TransportServer.Spec.Listener.Protocol == "UDP", @@ -117,13 +117,13 @@ func generateTransportServerConfig(transportServerEx *TransportServerEx, listene } func generateUnixSocket(transportServerEx *TransportServerEx) string { - if transportServerEx.TransportServer.Spec.Listener.Name == conf_v1alpha1.TLSPassthroughListenerName { + if transportServerEx.TransportServer.Spec.Listener.Name == conf_v1.TLSPassthroughListenerName { return fmt.Sprintf("unix:/var/lib/nginx/passthrough-%s_%s.sock", transportServerEx.TransportServer.Namespace, transportServerEx.TransportServer.Name) } return "" } -func generateSSLConfig(ts *conf_v1alpha1.TransportServer, tls *conf_v1alpha1.TLS, namespace string, secretRefs map[string]*secrets.SecretReference) (*version2.StreamSSL, Warnings) { +func generateSSLConfig(ts *conf_v1.TransportServer, tls *conf_v1.TransportServerTLS, namespace string, secretRefs map[string]*secrets.SecretReference) (*version2.StreamSSL, Warnings) { if tls == nil { return &version2.StreamSSL{Enabled: false}, nil } @@ -185,7 +185,7 @@ func generateStreamUpstreams(transportServerEx *TransportServerEx, upstreamNamer return upstreams, warnings } -func generateTransportServerHealthCheck(upstreamName string, generatedUpstreamName string, upstreams []conf_v1alpha1.Upstream) (*version2.StreamHealthCheck, *version2.Match) { +func generateTransportServerHealthCheck(upstreamName string, generatedUpstreamName string, upstreams []conf_v1.TransportServerUpstream) (*version2.StreamHealthCheck, *version2.Match) { var hc *version2.StreamHealthCheck var match *version2.Match @@ -234,7 +234,7 @@ func generateTransportServerHealthCheckWithDefaults() *version2.StreamHealthChec } } -func generateHealthCheckMatch(match *conf_v1alpha1.Match, name string) *version2.Match { +func generateHealthCheckMatch(match *conf_v1.TransportServerMatch, name string) *version2.Match { var modifier string var expect string @@ -256,7 +256,7 @@ func generateHealthCheckMatch(match *conf_v1alpha1.Match, name string) *version2 } } -func generateStreamUpstream(upstream conf_v1alpha1.Upstream, upstreamNamer *upstreamNamer, endpoints []string, isPlus bool) version2.StreamUpstream { +func generateStreamUpstream(upstream conf_v1.TransportServerUpstream, upstreamNamer *upstreamNamer, endpoints []string, isPlus bool) version2.StreamUpstream { var upsServers []version2.StreamUpstreamServer name := upstreamNamer.GetNameForUpstream(upstream.Name) diff --git a/internal/configs/transportserver_test.go b/internal/configs/transportserver_test.go index 7c5c92ee52..8845732814 100644 --- a/internal/configs/transportserver_test.go +++ b/internal/configs/transportserver_test.go @@ -8,14 +8,14 @@ import ( "github.com/google/go-cmp/cmp" "github.com/nginxinc/kubernetes-ingress/internal/configs/version2" "github.com/nginxinc/kubernetes-ingress/internal/k8s/secrets" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" + conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" api_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestUpstreamNamerForTransportServer(t *testing.T) { t.Parallel() - transportServer := conf_v1alpha1.TransportServer{ + transportServer := conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-app", Namespace: "default", @@ -40,7 +40,7 @@ func TestTransportServerExString(t *testing.T) { }{ { input: &TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "test-server", Namespace: "default", @@ -70,24 +70,24 @@ func TestTransportServerExString(t *testing.T) { func TestGenerateTransportServerConfigForTCPSnippets(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", Port: 5001, }, }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, ServerSnippets: "deny 192.168.1.1;\nallow 192.168.1.0/24;", @@ -156,24 +156,24 @@ func TestGenerateTransportServerConfigForTCPSnippets(t *testing.T) { func TestGenerateTransportServerConfigForIPV6Disabled(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", Port: 5001, }, }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -240,17 +240,17 @@ func TestGenerateTransportServerConfigForIPV6Disabled(t *testing.T) { func TestGenerateTransportServerConfigForTCP(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", @@ -259,14 +259,14 @@ func TestGenerateTransportServerConfigForTCP(t *testing.T) { FailTimeout: "40s", }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ ConnectTimeout: "30s", NextUpstream: false, }, - SessionParameters: &conf_v1alpha1.SessionParameters{ + SessionParameters: &conf_v1.SessionParameters{ Timeout: "50s", }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -332,17 +332,17 @@ func TestGenerateTransportServerConfigForTCP(t *testing.T) { func TestGenerateTransportServerConfigForTCPMaxConnections(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", @@ -352,14 +352,14 @@ func TestGenerateTransportServerConfigForTCPMaxConnections(t *testing.T) { FailTimeout: "40s", }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ ConnectTimeout: "30s", NextUpstream: false, }, - SessionParameters: &conf_v1alpha1.SessionParameters{ + SessionParameters: &conf_v1.SessionParameters{ Timeout: "50s", }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -427,31 +427,31 @@ func TestGenerateTransportServerConfigForTCPMaxConnections(t *testing.T) { func TestGenerateTransportServerConfigForTLSPassthrough(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "TLS_PASSTHROUGH", }, Host: "example.com", - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", Port: 5001, }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ ConnectTimeout: "30s", NextUpstream: false, NextUpstreamTries: 0, NextUpstreamTimeout: "", }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -523,25 +523,25 @@ func TestGenerateTransportServerConfigForUDP(t *testing.T) { udpResponses := 5 transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "udp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "udp-listener", Protocol: "UDP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "udp-app", Service: "udp-app-svc", Port: 5001, - HealthCheck: &conf_v1alpha1.HealthCheck{}, + HealthCheck: &conf_v1.TransportServerHealthCheck{}, }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ UDPRequests: &udpRequests, UDPResponses: &udpResponses, ConnectTimeout: "30s", @@ -549,7 +549,7 @@ func TestGenerateTransportServerConfigForUDP(t *testing.T) { NextUpstreamTimeout: "", NextUpstreamTries: 0, }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "udp-app", }, }, @@ -618,17 +618,17 @@ func TestGenerateTransportServerConfigForUDP(t *testing.T) { func TestGenerateTransportServerConfig_ProducesValidConfigOnValidInputForExternalNameServiceAndConfiguredResolver(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", @@ -637,14 +637,14 @@ func TestGenerateTransportServerConfig_ProducesValidConfigOnValidInputForExterna FailTimeout: "40s", }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ ConnectTimeout: "30s", NextUpstream: false, }, - SessionParameters: &conf_v1alpha1.SessionParameters{ + SessionParameters: &conf_v1.SessionParameters{ Timeout: "50s", }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -708,17 +708,17 @@ func TestGenerateTransportServerConfig_ProducesValidConfigOnValidInputForExterna func TestGenerateTransportServerConfig_GeneratesWarningOnNotConfiguredResolver(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", @@ -727,14 +727,14 @@ func TestGenerateTransportServerConfig_GeneratesWarningOnNotConfiguredResolver(t FailTimeout: "40s", }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ ConnectTimeout: "30s", NextUpstream: false, }, - SessionParameters: &conf_v1alpha1.SessionParameters{ + SessionParameters: &conf_v1.SessionParameters{ Timeout: "50s", }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -792,17 +792,17 @@ func TestGenerateTransportServerConfig_GeneratesWarningOnNotConfiguredResolver(t func TestGenerateTransportServerConfig_UsesNotExistignSocketOnNotPlusAndNoEndpoints(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", @@ -811,14 +811,14 @@ func TestGenerateTransportServerConfig_UsesNotExistignSocketOnNotPlusAndNoEndpoi FailTimeout: "40s", }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ ConnectTimeout: "30s", NextUpstream: false, }, - SessionParameters: &conf_v1alpha1.SessionParameters{ + SessionParameters: &conf_v1.SessionParameters{ Timeout: "50s", }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -878,20 +878,20 @@ func TestGenerateTransportServerConfig_UsesNotExistignSocketOnNotPlusAndNoEndpoi func TestGenerateTransportServerConfigForTCPWithTLS(t *testing.T) { t.Parallel() transportServerEx := TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - TLS: &conf_v1alpha1.TLS{ + TLS: &conf_v1.TransportServerTLS{ Secret: "my-secret", }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "tcp-app", Service: "tcp-app-svc", @@ -900,14 +900,14 @@ func TestGenerateTransportServerConfigForTCPWithTLS(t *testing.T) { FailTimeout: "40s", }, }, - UpstreamParameters: &conf_v1alpha1.UpstreamParameters{ + UpstreamParameters: &conf_v1.UpstreamParameters{ ConnectTimeout: "30s", NextUpstream: false, }, - SessionParameters: &conf_v1alpha1.SessionParameters{ + SessionParameters: &conf_v1.SessionParameters{ Timeout: "50s", }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "tcp-app", }, }, @@ -985,13 +985,13 @@ func TestGenerateTransportServerConfigForTCPWithTLS(t *testing.T) { func TestGenerateUnixSocket(t *testing.T) { t.Parallel() transportServerEx := &TransportServerEx{ - TransportServer: &conf_v1alpha1.TransportServer{ + TransportServer: &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "tcp-server", Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tls-passthrough", }, }, @@ -1020,16 +1020,16 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { generatedUpsteamName := "ts_namespace_name_dns-tcp" tests := []struct { - upstreams []conf_v1alpha1.Upstream + upstreams []conf_v1.TransportServerUpstream expectedHC *version2.StreamHealthCheck expectedMatch *version2.Match msg string }{ { - upstreams: []conf_v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "dns-tcp", - HealthCheck: &conf_v1alpha1.HealthCheck{ + HealthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: false, Timeout: "30s", Jitter: "30s", @@ -1045,10 +1045,10 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { msg: "health checks disabled", }, { - upstreams: []conf_v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "dns-tcp", - HealthCheck: &conf_v1alpha1.HealthCheck{}, + HealthCheck: &conf_v1.TransportServerHealthCheck{}, }, }, expectedHC: nil, @@ -1056,10 +1056,10 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { msg: "empty health check", }, { - upstreams: []conf_v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "dns-tcp", - HealthCheck: &conf_v1alpha1.HealthCheck{ + HealthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "40s", Jitter: "30s", @@ -1083,10 +1083,10 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { msg: "valid health checks", }, { - upstreams: []conf_v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "dns-tcp", - HealthCheck: &conf_v1alpha1.HealthCheck{ + HealthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "40s", Jitter: "30s", @@ -1098,7 +1098,7 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { }, { Name: "dns-tcp-2", - HealthCheck: &conf_v1alpha1.HealthCheck{ + HealthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: false, Timeout: "50s", Jitter: "60s", @@ -1122,11 +1122,11 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { msg: "valid 2 health checks", }, { - upstreams: []conf_v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "dns-tcp", Port: 90, - HealthCheck: &conf_v1alpha1.HealthCheck{ + HealthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, }, }, @@ -1143,13 +1143,13 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { msg: "return default values for health check", }, { - upstreams: []conf_v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "dns-tcp", Port: 90, - HealthCheck: &conf_v1alpha1.HealthCheck{ + HealthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, - Match: &conf_v1alpha1.Match{ + Match: &conf_v1.TransportServerMatch{ Send: `GET / HTTP/1.0\r\nHost: localhost\r\n\r\n`, Expect: "~*200 OK", }, @@ -1189,12 +1189,12 @@ func TestGenerateTransportServerHealthChecks(t *testing.T) { func TestGenerateHealthCheckMatch(t *testing.T) { t.Parallel() tests := []struct { - match *conf_v1alpha1.Match + match *conf_v1.TransportServerMatch expected *version2.Match msg string }{ { - match: &conf_v1alpha1.Match{ + match: &conf_v1.TransportServerMatch{ Send: "", Expect: "", }, @@ -1207,7 +1207,7 @@ func TestGenerateHealthCheckMatch(t *testing.T) { msg: "match with empty fields", }, { - match: &conf_v1alpha1.Match{ + match: &conf_v1.TransportServerMatch{ Send: "xxx", Expect: "yyy", }, @@ -1220,7 +1220,7 @@ func TestGenerateHealthCheckMatch(t *testing.T) { msg: "match with all fields and no regexp", }, { - match: &conf_v1alpha1.Match{ + match: &conf_v1.TransportServerMatch{ Send: "xxx", Expect: "~yyy", }, @@ -1233,7 +1233,7 @@ func TestGenerateHealthCheckMatch(t *testing.T) { msg: "match with all fields and case sensitive regexp", }, { - match: &conf_v1alpha1.Match{ + match: &conf_v1.TransportServerMatch{ Send: "xxx", Expect: "~*yyy", }, @@ -1263,7 +1263,7 @@ func intPointer(value int) *int { func TestGenerateTsSSLConfig(t *testing.T) { t.Parallel() validTests := []struct { - inputTLS *conf_v1alpha1.TLS + inputTLS *conf_v1.TransportServerTLS inputSecretRefs map[string]*secrets.SecretReference expectedSSL *version2.StreamSSL msg string @@ -1275,7 +1275,7 @@ func TestGenerateTsSSLConfig(t *testing.T) { msg: "no TLS field", }, { - inputTLS: &conf_v1alpha1.TLS{ + inputTLS: &conf_v1.TransportServerTLS{ Secret: "secret", }, inputSecretRefs: map[string]*secrets.SecretReference{ @@ -1296,14 +1296,14 @@ func TestGenerateTsSSLConfig(t *testing.T) { } invalidTests := []struct { - inputTLS *conf_v1alpha1.TLS + inputTLS *conf_v1.TransportServerTLS inputSecretRefs map[string]*secrets.SecretReference expectedSSL *version2.StreamSSL expectedWarnings Warnings msg string }{ { - inputTLS: &conf_v1alpha1.TLS{ + inputTLS: &conf_v1.TransportServerTLS{ Secret: "missing", }, inputSecretRefs: map[string]*secrets.SecretReference{ @@ -1319,7 +1319,7 @@ func TestGenerateTsSSLConfig(t *testing.T) { msg: "missing doesn't exist in the cluster with HTTPS", }, { - inputTLS: &conf_v1alpha1.TLS{ + inputTLS: &conf_v1.TransportServerTLS{ Secret: "mistyped", }, inputSecretRefs: map[string]*secrets.SecretReference{ diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go index 54832aa875..7879ebf9e1 100644 --- a/internal/configs/virtualserver_test.go +++ b/internal/configs/virtualserver_test.go @@ -11212,7 +11212,7 @@ var ( }, Spec: conf_v1.VirtualServerSpec{ Host: "cafe.example.com", - Listener: &conf_v1.Listener{ + Listener: &conf_v1.VirtualServerListener{ HTTP: "http-8083", HTTPS: "https-8443", }, @@ -11229,7 +11229,7 @@ var ( }, Spec: conf_v1.VirtualServerSpec{ Host: "cafe.example.com", - Listener: &conf_v1.Listener{ + Listener: &conf_v1.VirtualServerListener{ HTTP: "http-8083", }, }, @@ -11245,7 +11245,7 @@ var ( }, Spec: conf_v1.VirtualServerSpec{ Host: "cafe.example.com", - Listener: &conf_v1.Listener{ + Listener: &conf_v1.VirtualServerListener{ HTTPS: "https-8443", }, }, diff --git a/internal/k8s/configuration.go b/internal/k8s/configuration.go index e23eed1f06..04eae9d1c0 100644 --- a/internal/k8s/configuration.go +++ b/internal/k8s/configuration.go @@ -9,7 +9,6 @@ import ( "github.com/nginxinc/kubernetes-ingress/internal/configs" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/validation" networking "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/runtime" @@ -261,12 +260,12 @@ func (vsc *VirtualServerConfiguration) IsEqual(resource Resource) bool { // TransportServerConfiguration holds a TransportServer resource. type TransportServerConfiguration struct { ListenerPort int - TransportServer *conf_v1alpha1.TransportServer + TransportServer *conf_v1.TransportServer Warnings []string } // NewTransportServerConfiguration creates a new TransportServerConfiguration. -func NewTransportServerConfiguration(ts *conf_v1alpha1.TransportServer) *TransportServerConfiguration { +func NewTransportServerConfiguration(ts *conf_v1.TransportServer) *TransportServerConfiguration { return &TransportServerConfiguration{ TransportServer: ts, } @@ -329,15 +328,15 @@ type TransportServerMetrics struct { type Configuration struct { hosts map[string]Resource listeners map[string]*TransportServerConfiguration - listenerMap map[string]conf_v1alpha1.Listener + listenerMap map[string]conf_v1.Listener // only valid resources with the matching IngressClass are stored ingresses map[string]*networking.Ingress virtualServers map[string]*conf_v1.VirtualServer virtualServerRoutes map[string]*conf_v1.VirtualServerRoute - transportServers map[string]*conf_v1alpha1.TransportServer + transportServers map[string]*conf_v1.TransportServer - globalConfiguration *conf_v1alpha1.GlobalConfiguration + globalConfiguration *conf_v1.GlobalConfiguration hostProblems map[string]ConfigurationProblem listenerProblems map[string]ConfigurationProblem @@ -388,7 +387,7 @@ func NewConfiguration( ingresses: make(map[string]*networking.Ingress), virtualServers: make(map[string]*conf_v1.VirtualServer), virtualServerRoutes: make(map[string]*conf_v1.VirtualServerRoute), - transportServers: make(map[string]*conf_v1alpha1.TransportServer), + transportServers: make(map[string]*conf_v1.TransportServer), hostProblems: make(map[string]ConfigurationProblem), hasCorrectIngressClass: hasCorrectIngressClass, virtualServerValidator: virtualServerValidator, @@ -594,7 +593,7 @@ func (c *Configuration) DeleteVirtualServerRoute(key string) ([]ResourceChange, } // AddOrUpdateGlobalConfiguration adds or updates the GlobalConfiguration. -func (c *Configuration) AddOrUpdateGlobalConfiguration(gc *conf_v1alpha1.GlobalConfiguration) ([]ResourceChange, []ConfigurationProblem, error) { +func (c *Configuration) AddOrUpdateGlobalConfiguration(gc *conf_v1.GlobalConfiguration) ([]ResourceChange, []ConfigurationProblem, error) { c.lock.Lock() defer c.lock.Unlock() @@ -643,7 +642,7 @@ func (c *Configuration) DeleteGlobalConfiguration() ([]ResourceChange, []Configu } // GetGlobalConfiguration returns the current GlobalConfiguration. -func (c *Configuration) GetGlobalConfiguration() *conf_v1alpha1.GlobalConfiguration { +func (c *Configuration) GetGlobalConfiguration() *conf_v1.GlobalConfiguration { c.lock.RLock() defer c.lock.RUnlock() @@ -651,7 +650,7 @@ func (c *Configuration) GetGlobalConfiguration() *conf_v1alpha1.GlobalConfigurat } // AddOrUpdateTransportServer adds or updates the TransportServer. -func (c *Configuration) AddOrUpdateTransportServer(ts *conf_v1alpha1.TransportServer) ([]ResourceChange, []ConfigurationProblem) { +func (c *Configuration) AddOrUpdateTransportServer(ts *conf_v1.TransportServer) ([]ResourceChange, []ConfigurationProblem) { c.lock.Lock() defer c.lock.Unlock() @@ -769,7 +768,7 @@ func (c *Configuration) buildListenersAndTSConfigurations() (newListeners map[st newTSConfigs = make(map[string]*TransportServerConfiguration) for key, ts := range c.transportServers { - if ts.Spec.Listener.Protocol == conf_v1alpha1.TLSPassthroughListenerProtocol { + if ts.Spec.Listener.Protocol == conf_v1.TLSPassthroughListenerProtocol { continue } @@ -781,7 +780,7 @@ func (c *Configuration) buildListenersAndTSConfigurations() (newListeners map[st } found := false - var listener conf_v1alpha1.Listener + var listener conf_v1.Listener for _, l := range c.globalConfiguration.Spec.Listeners { if ts.Spec.Listener.Name == l.Name && ts.Spec.Listener.Protocol == l.Protocol { listener = l @@ -1468,7 +1467,7 @@ func (c *Configuration) buildHostsAndResources() (newHosts map[string]Resource, for _, key := range getSortedTransportServerKeys(c.transportServers) { ts := c.transportServers[key] - if ts.Spec.Listener.Name != conf_v1alpha1.TLSPassthroughListenerName && ts.Spec.Listener.Protocol != conf_v1alpha1.TLSPassthroughListenerProtocol { + if ts.Spec.Listener.Name != conf_v1.TLSPassthroughListenerName && ts.Spec.Listener.Protocol != conf_v1.TLSPassthroughListenerProtocol { continue } @@ -1655,7 +1654,7 @@ func (c *Configuration) GetTransportServerMetrics() *TransportServerMetrics { } func (c *Configuration) setGlobalConfigListenerMap() { - c.listenerMap = make(map[string]conf_v1alpha1.Listener) + c.listenerMap = make(map[string]conf_v1.Listener) if c.globalConfiguration != nil { for _, listener := range c.globalConfiguration.Spec.Listeners { @@ -1724,7 +1723,7 @@ func getSortedResourceKeys(m map[string]Resource) []string { return keys } -func getSortedTransportServerKeys(m map[string]*conf_v1alpha1.TransportServer) []string { +func getSortedTransportServerKeys(m map[string]*conf_v1.TransportServer) []string { var keys []string for k := range m { diff --git a/internal/k8s/configuration_test.go b/internal/k8s/configuration_test.go index e7b1b81398..60290ad2e7 100644 --- a/internal/k8s/configuration_test.go +++ b/internal/k8s/configuration_test.go @@ -6,7 +6,6 @@ import ( "github.com/google/go-cmp/cmp" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/validation" networking "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1879,7 +1878,7 @@ func TestHostCollisions(t *testing.T) { func TestAddTransportServer(t *testing.T) { configuration := createTestConfiguration() - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -2056,7 +2055,7 @@ func TestAddTransportServerForTLSPassthrough(t *testing.T) { func TestListenerFlip(t *testing.T) { configuration := createTestConfiguration() - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -2245,7 +2244,7 @@ func TestAddTransportServerWithIncorrectClass(t *testing.T) { func TestAddTransportServerWithNonExistingListener(t *testing.T) { configuration := createTestConfiguration() - addOrUpdateGlobalConfiguration(t, configuration, []conf_v1alpha1.Listener{}, noChanges, noProblems) + addOrUpdateGlobalConfiguration(t, configuration, []conf_v1.Listener{}, noChanges, noProblems) ts := createTestTransportServer("transportserver", "tcp-7777", "TCP") @@ -2286,7 +2285,7 @@ func TestDeleteNonExistingTransportServer(t *testing.T) { func TestAddOrUpdateGlobalConfiguration(t *testing.T) { configuration := createTestConfiguration() - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -2319,7 +2318,7 @@ func TestAddOrUpdateGlobalConfiguration(t *testing.T) { func TestAddOrUpdateGlobalConfigurationThenAddTransportServer(t *testing.T) { configuration := createTestConfiguration() - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -2333,7 +2332,7 @@ func TestAddOrUpdateGlobalConfigurationThenAddTransportServer(t *testing.T) { } gc := createTestGlobalConfiguration(listeners) - var nilGC *conf_v1alpha1.GlobalConfiguration + var nilGC *conf_v1.GlobalConfiguration var expectedChanges []ResourceChange var expectedProblems []ConfigurationProblem @@ -3254,7 +3253,7 @@ func TestDeleteGlobalConfigurationWithVirtualServerDeployedWithNoCustomListeners func TestPortCollisions(t *testing.T) { configuration := createTestConfiguration() - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -3472,7 +3471,7 @@ func TestChallengeIngressNoVSR(t *testing.T) { } } -func addOrUpdateGlobalConfiguration(t *testing.T, c *Configuration, listeners []conf_v1alpha1.Listener, expectedChanges []ResourceChange, expectedProblems []ConfigurationProblem) { +func addOrUpdateGlobalConfiguration(t *testing.T, c *Configuration, listeners []conf_v1.Listener, expectedChanges []ResourceChange, expectedProblems []ConfigurationProblem) { t.Helper() gc := createTestGlobalConfiguration(listeners) changes, problems, err := c.AddOrUpdateGlobalConfiguration(gc) @@ -3633,7 +3632,7 @@ func createTestVirtualServerWithListeners(name string, host string, httpListener CreationTimestamp: metav1.Now(), }, Spec: conf_v1.VirtualServerSpec{ - Listener: &conf_v1.Listener{ + Listener: &conf_v1.VirtualServerListener{ HTTP: httpListener, HTTPS: httpsListener, }, @@ -3699,47 +3698,47 @@ func createTestChallengeVirtualServerRoute(name string, host string, path string } } -func createTestTransportServer(name string, listenerName string, listenerProtocol string) *conf_v1alpha1.TransportServer { - return &conf_v1alpha1.TransportServer{ +func createTestTransportServer(name string, listenerName string, listenerProtocol string) *conf_v1.TransportServer { + return &conf_v1.TransportServer{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", CreationTimestamp: metav1.Now(), Generation: 1, }, - Spec: conf_v1alpha1.TransportServerSpec{ - Listener: conf_v1alpha1.TransportServerListener{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: listenerName, Protocol: listenerProtocol, }, - Upstreams: []conf_v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "myapp", Service: "myapp-svc", Port: 1234, }, }, - Action: &conf_v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "myapp", }, }, } } -func createTestTLSPassthroughTransportServer(name string, host string) *conf_v1alpha1.TransportServer { - ts := createTestTransportServer(name, conf_v1alpha1.TLSPassthroughListenerName, conf_v1alpha1.TLSPassthroughListenerProtocol) +func createTestTLSPassthroughTransportServer(name string, host string) *conf_v1.TransportServer { + ts := createTestTransportServer(name, conf_v1.TLSPassthroughListenerName, conf_v1.TLSPassthroughListenerProtocol) ts.Spec.Host = host return ts } -func createTestGlobalConfiguration(listeners []conf_v1alpha1.Listener) *conf_v1alpha1.GlobalConfiguration { - return &conf_v1alpha1.GlobalConfiguration{ +func createTestGlobalConfiguration(listeners []conf_v1.Listener) *conf_v1.GlobalConfiguration { + return &conf_v1.GlobalConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: "globalconfiguration", Namespace: "nginx-ingress", }, - Spec: conf_v1alpha1.GlobalConfigurationSpec{ + Spec: conf_v1.GlobalConfigurationSpec{ Listeners: listeners, }, } @@ -3982,7 +3981,7 @@ func (rc *testReferenceChecker) IsReferencedByVirtualServerRoute(namespace strin return rc.onlyVirtualServerRoutes && namespace == rc.resourceNamespace && name == rc.resourceName } -func (rc *testReferenceChecker) IsReferencedByTransportServer(namespace string, name string, _ *conf_v1alpha1.TransportServer) bool { +func (rc *testReferenceChecker) IsReferencedByTransportServer(namespace string, name string, _ *conf_v1.TransportServer) bool { return rc.onlyTransportServers && namespace == rc.resourceNamespace && name == rc.resourceName } @@ -4003,7 +4002,7 @@ func TestFindResourcesForResourceReference(t *testing.T) { }) vsr := createTestVirtualServerRoute("virtualserverroute", "asd.example.com", "/") tsPassthrough := createTestTLSPassthroughTransportServer("transportserver-passthrough", "ts.example.com") - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -4117,7 +4116,7 @@ func TestGetResources(t *testing.T) { passTS := createTestTLSPassthroughTransportServer("transportserver", "abc.example.com") ts := createTestTransportServer("transportserver-tcp", "tcp-7777", "TCP") - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -4185,7 +4184,7 @@ func TestGetTransportServerMetrics(t *testing.T) { tsUDP := createTestTransportServer("transportserver-udp", "udp-7777", "UDP") tests := []struct { - tses []*conf_v1alpha1.TransportServer + tses []*conf_v1.TransportServer expected *TransportServerMetrics msg string }{ @@ -4199,7 +4198,7 @@ func TestGetTransportServerMetrics(t *testing.T) { msg: "no TransportServers", }, { - tses: []*conf_v1alpha1.TransportServer{ + tses: []*conf_v1.TransportServer{ tsPass, }, expected: &TransportServerMetrics{ @@ -4210,7 +4209,7 @@ func TestGetTransportServerMetrics(t *testing.T) { msg: "one TLSPassthrough TransportServer", }, { - tses: []*conf_v1alpha1.TransportServer{ + tses: []*conf_v1.TransportServer{ tsTCP, }, expected: &TransportServerMetrics{ @@ -4221,7 +4220,7 @@ func TestGetTransportServerMetrics(t *testing.T) { msg: "one TCP TransportServer", }, { - tses: []*conf_v1alpha1.TransportServer{ + tses: []*conf_v1.TransportServer{ tsUDP, }, expected: &TransportServerMetrics{ @@ -4232,7 +4231,7 @@ func TestGetTransportServerMetrics(t *testing.T) { msg: "one UDP TransportServer", }, { - tses: []*conf_v1alpha1.TransportServer{ + tses: []*conf_v1.TransportServer{ tsPass, tsTCP, tsUDP, }, expected: &TransportServerMetrics{ @@ -4244,7 +4243,7 @@ func TestGetTransportServerMetrics(t *testing.T) { }, } - listeners := []conf_v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-7777", Port: 7777, @@ -4537,7 +4536,7 @@ var ( noProblems []ConfigurationProblem // customHTTPAndHTTPSListeners defines a custom HTTP and HTTPS listener on port 8082 and 8442 - customHTTPAndHTTPSListeners = []conf_v1alpha1.Listener{ + customHTTPAndHTTPSListeners = []conf_v1.Listener{ { Name: "http-8082", Port: 8082, @@ -4552,7 +4551,7 @@ var ( } // customHTTPSListener defines a customHTTPS listener on port 8442 - customHTTPSListener = []conf_v1alpha1.Listener{ + customHTTPSListener = []conf_v1.Listener{ { Name: "https-8442", Port: 8442, @@ -4562,7 +4561,7 @@ var ( } // customHTTPListener defines a custom HTTP listener on port 8082 - customHTTPListener = []conf_v1alpha1.Listener{ + customHTTPListener = []conf_v1.Listener{ { Name: "http-8082", Port: 8082, @@ -4571,7 +4570,7 @@ var ( } // customHTTPListenerSSLTrue defines a custom HTTP listener on port 8082 with SSL set to true - customHTTPListenerSSLTrue = []conf_v1alpha1.Listener{ + customHTTPListenerSSLTrue = []conf_v1.Listener{ { Name: "http-8082", Port: 8082, @@ -4587,7 +4586,7 @@ var ( } // customHTTPSListenerSSLFalse defines a custom HTTPS listener on port 8442 with SSL set to false - customHTTPSListenerSSLFalse = []conf_v1alpha1.Listener{ + customHTTPSListenerSSLFalse = []conf_v1.Listener{ { Name: "http-8082", Port: 8082, @@ -4603,7 +4602,7 @@ var ( } // bogusHTTPListener defines a HTTP listener with an invalid name - bogusHTTPListener = []conf_v1alpha1.Listener{ + bogusHTTPListener = []conf_v1.Listener{ { Name: "http-bogus", Port: 8082, @@ -4618,7 +4617,7 @@ var ( } // bogusHTTPsListener defines a HTTPs listener with an invalid name - bogusHTTPSListener = []conf_v1alpha1.Listener{ + bogusHTTPSListener = []conf_v1.Listener{ { Name: "http-8082", Port: 8082, diff --git a/internal/k8s/controller.go b/internal/k8s/controller.go index e38efa699b..7cd64a0b31 100644 --- a/internal/k8s/controller.go +++ b/internal/k8s/controller.go @@ -60,7 +60,6 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/validation" k8s_nginx "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned" k8s_nginx_informers "github.com/nginxinc/kubernetes-ingress/pkg/client/informers/externalversions" @@ -594,11 +593,11 @@ func (nsi *namespacedInformer) addPolicyHandler(handlers cache.ResourceEventHand func (lbc *LoadBalancerController) addGlobalConfigurationHandler(handlers cache.ResourceEventHandlerFuncs, namespace string, name string) { lbc.globalConfigurationLister, lbc.globalConfigurationController = cache.NewInformer( cache.NewListWatchFromClient( - lbc.confClient.K8sV1alpha1().RESTClient(), + lbc.confClient.K8sV1().RESTClient(), "globalconfigurations", namespace, fields.Set{"metadata.name": name}.AsSelector()), - &conf_v1alpha1.GlobalConfiguration{}, + &conf_v1.GlobalConfiguration{}, lbc.resync, handlers, ) @@ -606,7 +605,7 @@ func (lbc *LoadBalancerController) addGlobalConfigurationHandler(handlers cache. } func (nsi *namespacedInformer) addTransportServerHandler(handlers cache.ResourceEventHandlerFuncs) { - informer := nsi.confSharedInformerFactory.K8s().V1alpha1().TransportServers().Informer() + informer := nsi.confSharedInformerFactory.K8s().V1().TransportServers().Informer() informer.AddEventHandler(handlers) nsi.transportServerLister = informer.GetStore() @@ -1161,7 +1160,7 @@ func (lbc *LoadBalancerController) cleanupUnwatchedNamespacedResources(nsi *name var delTsList []string for _, obj := range nsi.transportServerLister.List() { - ts := obj.(*conf_v1alpha1.TransportServer) + ts := obj.(*conf_v1.TransportServer) key := getResourceKey(&ts.ObjectMeta) delTsList = append(delTsList, key) lbc.configuration.DeleteTransportServer(key) @@ -1403,7 +1402,7 @@ func (lbc *LoadBalancerController) syncTransportServer(task task) { changes, problems = lbc.configuration.DeleteTransportServer(key) } else { glog.V(2).Infof("Adding or Updating TransportServer: %v\n", key) - ts := obj.(*conf_v1alpha1.TransportServer) + ts := obj.(*conf_v1.TransportServer) changes, problems = lbc.configuration.AddOrUpdateTransportServer(ts) } @@ -1430,7 +1429,7 @@ func (lbc *LoadBalancerController) syncGlobalConfiguration(task task) { } else { glog.V(2).Infof("Adding or Updating GlobalConfiguration: %v\n", key) - gc := obj.(*conf_v1alpha1.GlobalConfiguration) + gc := obj.(*conf_v1.GlobalConfiguration) changes, problems, validationErr = lbc.configuration.AddOrUpdateGlobalConfiguration(gc) } @@ -1453,7 +1452,7 @@ func (lbc *LoadBalancerController) syncGlobalConfiguration(task task) { eventMessage = fmt.Sprintf("%s; with reload error: %v", eventMessage, updateErr) } - gc := obj.(*conf_v1alpha1.GlobalConfiguration) + gc := obj.(*conf_v1.GlobalConfiguration) lbc.recorder.Eventf(gc, eventType, eventTitle, eventMessage) } @@ -1516,7 +1515,7 @@ func (lbc *LoadBalancerController) processProblems(problems []ConfigurationProbl if err != nil { glog.Errorf("Error when updating the status for VirtualServer %v/%v: %v", obj.Namespace, obj.Name, err) } - case *conf_v1alpha1.TransportServer: + case *conf_v1.TransportServer: err := lbc.statusUpdater.UpdateTransportServerStatus(obj, state, p.Reason, p.Message) if err != nil { glog.Errorf("Error when updating the status for TransportServer %v/%v: %v", obj.Namespace, obj.Name, err) @@ -2690,7 +2689,7 @@ func (lbc *LoadBalancerController) updateTransportServersStatusFromEvents() erro var allErrs []error for _, nsi := range lbc.namespacedInformers { for _, obj := range nsi.transportServerLister.List() { - ts := obj.(*conf_v1alpha1.TransportServer) + ts := obj.(*conf_v1.TransportServer) events, err := lbc.client.CoreV1().Events(ts.Namespace).List(context.TODO(), meta_v1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%v,involvedObject.uid=%v", ts.Name, ts.UID)}) @@ -3528,7 +3527,7 @@ func isMatchingResourceRef(ownerNs, resRef, key string) bool { return resRef == key } -func (lbc *LoadBalancerController) createTransportServerEx(transportServer *conf_v1alpha1.TransportServer, listenerPort int) *configs.TransportServerEx { +func (lbc *LoadBalancerController) createTransportServerEx(transportServer *conf_v1.TransportServer, listenerPort int) *configs.TransportServerEx { endpoints := make(map[string][]string) externalNameSvcs := make(map[string]bool) podsByIP := make(map[string]string) @@ -3972,7 +3971,7 @@ func (lbc *LoadBalancerController) HasCorrectIngressClass(obj interface{}) bool class = obj.Spec.IngressClass case *conf_v1.VirtualServerRoute: class = obj.Spec.IngressClass - case *conf_v1alpha1.TransportServer: + case *conf_v1.TransportServer: class = obj.Spec.IngressClass case *conf_v1.Policy: class = obj.Spec.IngressClass diff --git a/internal/k8s/controller_test.go b/internal/k8s/controller_test.go index 4a30de2aa7..7a7331f428 100644 --- a/internal/k8s/controller_test.go +++ b/internal/k8s/controller_test.go @@ -19,7 +19,6 @@ import ( "github.com/nginxinc/kubernetes-ingress/internal/metrics/collectors" "github.com/nginxinc/kubernetes-ingress/internal/nginx" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" api_v1 "k8s.io/api/core/v1" networking "k8s.io/api/networking/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -173,7 +172,7 @@ func deepCopyWithIngressClass(obj interface{}, class string) interface{} { objCopy := obj.DeepCopy() objCopy.Spec.IngressClass = class return objCopy - case *conf_v1alpha1.TransportServer: + case *conf_v1.TransportServer: objCopy := obj.DeepCopy() objCopy.Spec.IngressClass = class return objCopy @@ -217,7 +216,7 @@ func TestIngressClassForCustomResources(t *testing.T) { resources := []interface{}{ &conf_v1.VirtualServer{}, &conf_v1.VirtualServerRoute{}, - &conf_v1alpha1.TransportServer{}, + &conf_v1.TransportServer{}, } for _, r := range resources { diff --git a/internal/k8s/handlers.go b/internal/k8s/handlers.go index 7b07b7f69b..88f06a7766 100644 --- a/internal/k8s/handlers.go +++ b/internal/k8s/handlers.go @@ -16,8 +16,6 @@ import ( "k8s.io/client-go/tools/cache" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -357,19 +355,19 @@ func createVirtualServerRouteHandlers(lbc *LoadBalancerController) cache.Resourc func createGlobalConfigurationHandlers(lbc *LoadBalancerController) cache.ResourceEventHandlerFuncs { return cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - gc := obj.(*conf_v1alpha1.GlobalConfiguration) + gc := obj.(*conf_v1.GlobalConfiguration) glog.V(3).Infof("Adding GlobalConfiguration: %v", gc.Name) lbc.AddSyncQueue(gc) }, DeleteFunc: func(obj interface{}) { - gc, isGc := obj.(*conf_v1alpha1.GlobalConfiguration) + gc, isGc := obj.(*conf_v1.GlobalConfiguration) if !isGc { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.V(3).Infof("Error received unexpected object: %v", obj) return } - gc, ok = deletedState.Obj.(*conf_v1alpha1.GlobalConfiguration) + gc, ok = deletedState.Obj.(*conf_v1.GlobalConfiguration) if !ok { glog.V(3).Infof("Error DeletedFinalStateUnknown contained non-GlobalConfiguration object: %v", deletedState.Obj) return @@ -379,7 +377,7 @@ func createGlobalConfigurationHandlers(lbc *LoadBalancerController) cache.Resour lbc.AddSyncQueue(gc) }, UpdateFunc: func(old, cur interface{}) { - curGc := cur.(*conf_v1alpha1.GlobalConfiguration) + curGc := cur.(*conf_v1.GlobalConfiguration) if !reflect.DeepEqual(old, cur) { glog.V(3).Infof("GlobalConfiguration %v changed, syncing", curGc.Name) lbc.AddSyncQueue(curGc) @@ -391,19 +389,19 @@ func createGlobalConfigurationHandlers(lbc *LoadBalancerController) cache.Resour func createTransportServerHandlers(lbc *LoadBalancerController) cache.ResourceEventHandlerFuncs { return cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - ts := obj.(*conf_v1alpha1.TransportServer) + ts := obj.(*conf_v1.TransportServer) glog.V(3).Infof("Adding TransportServer: %v", ts.Name) lbc.AddSyncQueue(ts) }, DeleteFunc: func(obj interface{}) { - ts, isTs := obj.(*conf_v1alpha1.TransportServer) + ts, isTs := obj.(*conf_v1.TransportServer) if !isTs { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.V(3).Infof("Error received unexpected object: %v", obj) return } - ts, ok = deletedState.Obj.(*conf_v1alpha1.TransportServer) + ts, ok = deletedState.Obj.(*conf_v1.TransportServer) if !ok { glog.V(3).Infof("Error DeletedFinalStateUnknown contained non-TransportServer object: %v", deletedState.Obj) return @@ -413,7 +411,7 @@ func createTransportServerHandlers(lbc *LoadBalancerController) cache.ResourceEv lbc.AddSyncQueue(ts) }, UpdateFunc: func(old, cur interface{}) { - curTs := cur.(*conf_v1alpha1.TransportServer) + curTs := cur.(*conf_v1.TransportServer) if !reflect.DeepEqual(old, cur) { glog.V(3).Infof("TransportServer %v changed, syncing", curTs.Name) lbc.AddSyncQueue(curTs) diff --git a/internal/k8s/reference_checkers.go b/internal/k8s/reference_checkers.go index f8fe80467a..237e06c4f4 100644 --- a/internal/k8s/reference_checkers.go +++ b/internal/k8s/reference_checkers.go @@ -4,17 +4,16 @@ import ( "strings" "github.com/nginxinc/kubernetes-ingress/internal/configs" - v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" + conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" networking "k8s.io/api/networking/v1" ) type resourceReferenceChecker interface { IsReferencedByIngress(namespace string, name string, ing *networking.Ingress) bool IsReferencedByMinion(namespace string, name string, ing *networking.Ingress) bool - IsReferencedByVirtualServer(namespace string, name string, vs *v1.VirtualServer) bool - IsReferencedByVirtualServerRoute(namespace string, name string, vsr *v1.VirtualServerRoute) bool - IsReferencedByTransportServer(namespace string, name string, ts *conf_v1alpha1.TransportServer) bool + IsReferencedByVirtualServer(namespace string, name string, vs *conf_v1.VirtualServer) bool + IsReferencedByVirtualServerRoute(namespace string, name string, vsr *conf_v1.VirtualServerRoute) bool + IsReferencedByTransportServer(namespace string, name string, ts *conf_v1.TransportServer) bool } type secretReferenceChecker struct { @@ -75,7 +74,7 @@ func (rc *secretReferenceChecker) IsReferencedByMinion(secretNamespace string, s return false } -func (rc *secretReferenceChecker) IsReferencedByVirtualServer(secretNamespace string, secretName string, vs *v1.VirtualServer) bool { +func (rc *secretReferenceChecker) IsReferencedByVirtualServer(secretNamespace string, secretName string, vs *conf_v1.VirtualServer) bool { if vs.Namespace != secretNamespace { return false } @@ -87,11 +86,11 @@ func (rc *secretReferenceChecker) IsReferencedByVirtualServer(secretNamespace st return false } -func (rc *secretReferenceChecker) IsReferencedByVirtualServerRoute(_ string, _ string, _ *v1.VirtualServerRoute) bool { +func (rc *secretReferenceChecker) IsReferencedByVirtualServerRoute(_ string, _ string, _ *conf_v1.VirtualServerRoute) bool { return false } -func (rc *secretReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1alpha1.TransportServer) bool { +func (rc *secretReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1.TransportServer) bool { return false } @@ -131,7 +130,7 @@ func (rc *serviceReferenceChecker) IsReferencedByMinion(svcNamespace string, svc return rc.IsReferencedByIngress(svcNamespace, svcName, ing) } -func (rc *serviceReferenceChecker) IsReferencedByVirtualServer(svcNamespace string, svcName string, vs *v1.VirtualServer) bool { +func (rc *serviceReferenceChecker) IsReferencedByVirtualServer(svcNamespace string, svcName string, vs *conf_v1.VirtualServer) bool { if vs.Namespace != svcNamespace { return false } @@ -148,7 +147,7 @@ func (rc *serviceReferenceChecker) IsReferencedByVirtualServer(svcNamespace stri return false } -func (rc *serviceReferenceChecker) IsReferencedByVirtualServerRoute(svcNamespace string, svcName string, vsr *v1.VirtualServerRoute) bool { +func (rc *serviceReferenceChecker) IsReferencedByVirtualServerRoute(svcNamespace string, svcName string, vsr *conf_v1.VirtualServerRoute) bool { if vsr.Namespace != svcNamespace { return false } @@ -165,7 +164,7 @@ func (rc *serviceReferenceChecker) IsReferencedByVirtualServerRoute(svcNamespace return false } -func (rc *serviceReferenceChecker) IsReferencedByTransportServer(svcNamespace string, svcName string, ts *conf_v1alpha1.TransportServer) bool { +func (rc *serviceReferenceChecker) IsReferencedByTransportServer(svcNamespace string, svcName string, ts *conf_v1.TransportServer) bool { if ts.Namespace != svcNamespace { return false } @@ -193,7 +192,7 @@ func (rc *policyReferenceChecker) IsReferencedByMinion(_ string, _ string, _ *ne return false } -func (rc *policyReferenceChecker) IsReferencedByVirtualServer(policyNamespace string, policyName string, vs *v1.VirtualServer) bool { +func (rc *policyReferenceChecker) IsReferencedByVirtualServer(policyNamespace string, policyName string, vs *conf_v1.VirtualServer) bool { if isPolicyReferenced(vs.Spec.Policies, vs.Namespace, policyNamespace, policyName) { return true } @@ -207,7 +206,7 @@ func (rc *policyReferenceChecker) IsReferencedByVirtualServer(policyNamespace st return false } -func (rc *policyReferenceChecker) IsReferencedByVirtualServerRoute(policyNamespace string, policyName string, vsr *v1.VirtualServerRoute) bool { +func (rc *policyReferenceChecker) IsReferencedByVirtualServerRoute(policyNamespace string, policyName string, vsr *conf_v1.VirtualServerRoute) bool { for _, r := range vsr.Spec.Subroutes { if isPolicyReferenced(r.Policies, vsr.Namespace, policyNamespace, policyName) { return true @@ -217,7 +216,7 @@ func (rc *policyReferenceChecker) IsReferencedByVirtualServerRoute(policyNamespa return false } -func (rc *policyReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1alpha1.TransportServer) bool { +func (rc *policyReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1.TransportServer) bool { return false } @@ -247,19 +246,19 @@ func (rc *appProtectResourceReferenceChecker) IsReferencedByMinion(_ string, _ s return false } -func (rc *appProtectResourceReferenceChecker) IsReferencedByVirtualServer(_ string, _ string, _ *v1.VirtualServer) bool { +func (rc *appProtectResourceReferenceChecker) IsReferencedByVirtualServer(_ string, _ string, _ *conf_v1.VirtualServer) bool { return false } -func (rc *appProtectResourceReferenceChecker) IsReferencedByVirtualServerRoute(_ string, _ string, _ *v1.VirtualServerRoute) bool { +func (rc *appProtectResourceReferenceChecker) IsReferencedByVirtualServerRoute(_ string, _ string, _ *conf_v1.VirtualServerRoute) bool { return false } -func (rc *appProtectResourceReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1alpha1.TransportServer) bool { +func (rc *appProtectResourceReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1.TransportServer) bool { return false } -func isPolicyReferenced(policies []v1.PolicyReference, resourceNamespace string, policyNamespace string, policyName string) bool { +func isPolicyReferenced(policies []conf_v1.PolicyReference, resourceNamespace string, policyNamespace string, policyName string) bool { for _, p := range policies { namespace := p.Namespace if namespace == "" { @@ -294,7 +293,7 @@ func (rc *dosResourceReferenceChecker) IsReferencedByMinion(_ string, _ string, return false } -func (rc *dosResourceReferenceChecker) IsReferencedByVirtualServer(namespace string, name string, vs *v1.VirtualServer) bool { +func (rc *dosResourceReferenceChecker) IsReferencedByVirtualServer(namespace string, name string, vs *conf_v1.VirtualServer) bool { if vs.Spec.Dos == namespace+"/"+name || (namespace == vs.Namespace && vs.Spec.Dos == name) { return true } @@ -306,7 +305,7 @@ func (rc *dosResourceReferenceChecker) IsReferencedByVirtualServer(namespace str return false } -func (rc *dosResourceReferenceChecker) IsReferencedByVirtualServerRoute(namespace string, name string, vsr *v1.VirtualServerRoute) bool { +func (rc *dosResourceReferenceChecker) IsReferencedByVirtualServerRoute(namespace string, name string, vsr *conf_v1.VirtualServerRoute) bool { for _, route := range vsr.Spec.Subroutes { if route.Dos == namespace+"/"+name || (namespace == vsr.Namespace && route.Dos == name) { return true @@ -315,6 +314,6 @@ func (rc *dosResourceReferenceChecker) IsReferencedByVirtualServerRoute(namespac return false } -func (rc *dosResourceReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1alpha1.TransportServer) bool { +func (rc *dosResourceReferenceChecker) IsReferencedByTransportServer(_ string, _ string, _ *conf_v1.TransportServer) bool { return false } diff --git a/internal/k8s/reference_checkers_test.go b/internal/k8s/reference_checkers_test.go index 2262c47a86..ba2a4b2892 100644 --- a/internal/k8s/reference_checkers_test.go +++ b/internal/k8s/reference_checkers_test.go @@ -5,7 +5,6 @@ import ( "github.com/nginxinc/kubernetes-ingress/internal/configs" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" networking "k8s.io/api/networking/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -579,19 +578,19 @@ func TestServiceIsReferencedByVirtualServerAndVirtualServerRoutes(t *testing.T) func TestIsServiceReferencedByTransportServer(t *testing.T) { t.Parallel() tests := []struct { - ts *conf_v1alpha1.TransportServer + ts *conf_v1.TransportServer serviceNamespace string serviceName string expected bool msg string }{ { - ts: &conf_v1alpha1.TransportServer{ + ts: &conf_v1.TransportServer{ ObjectMeta: v1.ObjectMeta{ Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Upstreams: []conf_v1alpha1.Upstream{ + Spec: conf_v1.TransportServerSpec{ + Upstreams: []conf_v1.TransportServerUpstream{ { Service: "test-service", }, @@ -604,12 +603,12 @@ func TestIsServiceReferencedByTransportServer(t *testing.T) { msg: "service is referenced in an upstream", }, { - ts: &conf_v1alpha1.TransportServer{ + ts: &conf_v1.TransportServer{ ObjectMeta: v1.ObjectMeta{ Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Upstreams: []conf_v1alpha1.Upstream{ + Spec: conf_v1.TransportServerSpec{ + Upstreams: []conf_v1.TransportServerUpstream{ { Service: "test-service", }, @@ -622,12 +621,12 @@ func TestIsServiceReferencedByTransportServer(t *testing.T) { msg: "wrong namespace for service in an upstream", }, { - ts: &conf_v1alpha1.TransportServer{ + ts: &conf_v1.TransportServer{ ObjectMeta: v1.ObjectMeta{ Namespace: "default", }, - Spec: conf_v1alpha1.TransportServerSpec{ - Upstreams: []conf_v1alpha1.Upstream{ + Spec: conf_v1.TransportServerSpec{ + Upstreams: []conf_v1.TransportServerUpstream{ { Service: "test-service", }, diff --git a/internal/k8s/status.go b/internal/k8s/status.go index bec7dcf5a7..6a337947b4 100644 --- a/internal/k8s/status.go +++ b/internal/k8s/status.go @@ -10,7 +10,6 @@ import ( "github.com/golang/glog" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" k8s_nginx "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned" api_v1 "k8s.io/api/core/v1" networking "k8s.io/api/networking/v1" @@ -360,14 +359,14 @@ func (su *statusUpdater) ClearStatusFromIngressLink() { su.externalEndpoints = su.generateExternalEndpointsFromStatus(su.status) } -func (su *statusUpdater) retryUpdateTransportServerStatus(tsCopy *conf_v1alpha1.TransportServer) error { - ts, err := su.confClient.K8sV1alpha1().TransportServers(tsCopy.Namespace).Get(context.TODO(), tsCopy.Name, metav1.GetOptions{}) +func (su *statusUpdater) retryUpdateTransportServerStatus(tsCopy *conf_v1.TransportServer) error { + ts, err := su.confClient.K8sV1().TransportServers(tsCopy.Namespace).Get(context.TODO(), tsCopy.Name, metav1.GetOptions{}) if err != nil { return err } ts.Status = tsCopy.Status - _, err = su.confClient.K8sV1alpha1().TransportServers(ts.Namespace).UpdateStatus(context.TODO(), ts, metav1.UpdateOptions{}) + _, err = su.confClient.K8sV1().TransportServers(ts.Namespace).UpdateStatus(context.TODO(), ts, metav1.UpdateOptions{}) if err != nil { return err } @@ -422,7 +421,7 @@ func hasVsStatusChanged(vs *conf_v1.VirtualServer, state string, reason string, } // UpdateTransportServerStatus updates the status of a TransportServer. -func (su *statusUpdater) UpdateTransportServerStatus(ts *conf_v1alpha1.TransportServer, state string, reason string, message string) error { +func (su *statusUpdater) UpdateTransportServerStatus(ts *conf_v1.TransportServer, state string, reason string, message string) error { var tsLatest interface{} var exists bool var err error @@ -438,16 +437,16 @@ func (su *statusUpdater) UpdateTransportServerStatus(ts *conf_v1alpha1.Transport return nil } - if !hasTsStatusChanged(tsLatest.(*conf_v1alpha1.TransportServer), state, reason, message) { + if !hasTsStatusChanged(tsLatest.(*conf_v1.TransportServer), state, reason, message) { return nil } - tsCopy := tsLatest.(*conf_v1alpha1.TransportServer).DeepCopy() + tsCopy := tsLatest.(*conf_v1.TransportServer).DeepCopy() tsCopy.Status.State = state tsCopy.Status.Reason = reason tsCopy.Status.Message = message - _, err = su.confClient.K8sV1alpha1().TransportServers(tsCopy.Namespace).UpdateStatus(context.TODO(), tsCopy, metav1.UpdateOptions{}) + _, err = su.confClient.K8sV1().TransportServers(tsCopy.Namespace).UpdateStatus(context.TODO(), tsCopy, metav1.UpdateOptions{}) if err != nil { glog.V(3).Infof("error setting TransportServer %v/%v status, retrying: %v", tsCopy.Namespace, tsCopy.Name, err) return su.retryUpdateTransportServerStatus(tsCopy) @@ -455,7 +454,7 @@ func (su *statusUpdater) UpdateTransportServerStatus(ts *conf_v1alpha1.Transport return err } -func hasTsStatusChanged(ts *conf_v1alpha1.TransportServer, state string, reason string, message string) bool { +func hasTsStatusChanged(ts *conf_v1.TransportServer, state string, reason string, message string) bool { if ts.Status.State != state { return true } diff --git a/internal/k8s/status_test.go b/internal/k8s/status_test.go index 39dbe906b0..07af62480c 100644 --- a/internal/k8s/status_test.go +++ b/internal/k8s/status_test.go @@ -7,7 +7,6 @@ import ( "github.com/google/go-cmp/cmp" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" fake_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned/fake" v1 "k8s.io/api/core/v1" networking "k8s.io/api/networking/v1" @@ -20,12 +19,12 @@ import ( func TestUpdateTransportServerStatus(t *testing.T) { t.Parallel() - ts := &conf_v1alpha1.TransportServer{ + ts := &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "ts-1", Namespace: "default", }, - Status: conf_v1alpha1.TransportServerStatus{ + Status: conf_v1.TransportServerStatus{ State: "before status", Reason: "before reason", Message: "before message", @@ -33,8 +32,8 @@ func TestUpdateTransportServerStatus(t *testing.T) { } fakeClient := fake_v1alpha1.NewSimpleClientset( - &conf_v1alpha1.TransportServerList{ - Items: []conf_v1alpha1.TransportServer{ + &conf_v1.TransportServerList{ + Items: []conf_v1.TransportServer{ *ts, }, }) @@ -57,9 +56,9 @@ func TestUpdateTransportServerStatus(t *testing.T) { if err != nil { t.Errorf("error updating transportserver status: %v", err) } - updatedTs, _ := fakeClient.K8sV1alpha1().TransportServers(ts.Namespace).Get(context.TODO(), ts.Name, meta_v1.GetOptions{}) + updatedTs, _ := fakeClient.K8sV1().TransportServers(ts.Namespace).Get(context.TODO(), ts.Name, meta_v1.GetOptions{}) - expectedStatus := conf_v1alpha1.TransportServerStatus{ + expectedStatus := conf_v1.TransportServerStatus{ State: "after status", Reason: "after reason", Message: "after message", @@ -72,12 +71,12 @@ func TestUpdateTransportServerStatus(t *testing.T) { func TestUpdateTransportServerStatusIgnoreNoChange(t *testing.T) { t.Parallel() - ts := &conf_v1alpha1.TransportServer{ + ts := &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "ts-1", Namespace: "default", }, - Status: conf_v1alpha1.TransportServerStatus{ + Status: conf_v1.TransportServerStatus{ State: "same status", Reason: "same reason", Message: "same message", @@ -85,20 +84,20 @@ func TestUpdateTransportServerStatusIgnoreNoChange(t *testing.T) { } fakeClient := fake_v1alpha1.NewSimpleClientset( - &conf_v1alpha1.TransportServerList{ - Items: []conf_v1alpha1.TransportServer{ + &conf_v1.TransportServerList{ + Items: []conf_v1.TransportServer{ *ts, }, }) tsLister, _ := cache.NewInformer( cache.NewListWatchFromClient( - fakeClient.K8sV1alpha1().RESTClient(), + fakeClient.K8sV1().RESTClient(), "transportservers", "nginx-ingress", fields.Everything(), ), - &conf_v1alpha1.TransportServer{}, + &conf_v1.TransportServer{}, 2, nil, ) @@ -119,7 +118,7 @@ func TestUpdateTransportServerStatusIgnoreNoChange(t *testing.T) { if err != nil { t.Errorf("error updating transportserver status: %v", err) } - updatedTs, _ := fakeClient.K8sV1alpha1().TransportServers(ts.Namespace).Get(context.TODO(), ts.Name, meta_v1.GetOptions{}) + updatedTs, _ := fakeClient.K8sV1().TransportServers(ts.Namespace).Get(context.TODO(), ts.Name, meta_v1.GetOptions{}) if updatedTs.Status.State != "same status" { t.Errorf("expected: %v actual: %v", "same status", updatedTs.Status.State) @@ -134,12 +133,12 @@ func TestUpdateTransportServerStatusIgnoreNoChange(t *testing.T) { func TestUpdateTransportServerStatusMissingTransportServer(t *testing.T) { t.Parallel() - ts := &conf_v1alpha1.TransportServer{ + ts := &conf_v1.TransportServer{ ObjectMeta: meta_v1.ObjectMeta{ Name: "ts-1", Namespace: "default", }, - Status: conf_v1alpha1.TransportServerStatus{ + Status: conf_v1.TransportServerStatus{ State: "before status", Reason: "before reason", Message: "before message", @@ -147,18 +146,18 @@ func TestUpdateTransportServerStatusMissingTransportServer(t *testing.T) { } fakeClient := fake_v1alpha1.NewSimpleClientset( - &conf_v1alpha1.TransportServerList{ - Items: []conf_v1alpha1.TransportServer{}, + &conf_v1.TransportServerList{ + Items: []conf_v1.TransportServer{}, }) tsLister, _ := cache.NewInformer( cache.NewListWatchFromClient( - fakeClient.K8sV1alpha1().RESTClient(), + fakeClient.K8sV1().RESTClient(), "transportservers", "nginx-ingress", fields.Everything(), ), - &conf_v1alpha1.TransportServer{}, + &conf_v1.TransportServer{}, 2, nil, ) @@ -183,7 +182,7 @@ func TestUpdateTransportServerStatusMissingTransportServer(t *testing.T) { t.Errorf("unexpected error: %v, result should be empty as no matching TransportServer is present", err) } - updatedTs, _ := fakeClient.K8sV1alpha1().TransportServers(ts.Namespace).Get(context.TODO(), ts.Name, meta_v1.GetOptions{}) + updatedTs, _ := fakeClient.K8sV1().TransportServers(ts.Namespace).Get(context.TODO(), ts.Name, meta_v1.GetOptions{}) if updatedTs != nil { t.Errorf("expected TransportServer Store would be empty as provided TransportServer was not found. Unexpected updated TransportServer: %v", updatedTs) } diff --git a/internal/k8s/task_queue.go b/internal/k8s/task_queue.go index 502ef53576..9bdf579931 100644 --- a/internal/k8s/task_queue.go +++ b/internal/k8s/task_queue.go @@ -10,7 +10,6 @@ import ( "github.com/nginxinc/kubernetes-ingress/internal/k8s/appprotect" "github.com/nginxinc/kubernetes-ingress/internal/k8s/appprotectdos" conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" - conf_v1alpha1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" v1 "k8s.io/api/core/v1" discovery_v1 "k8s.io/api/discovery/v1" networking "k8s.io/api/networking/v1" @@ -157,9 +156,9 @@ func newTask(key string, obj interface{}) (task, error) { k = virtualServerRoute case *conf_v1.Policy: k = policy - case *conf_v1alpha1.GlobalConfiguration: + case *conf_v1.GlobalConfiguration: k = globalConfiguration - case *conf_v1alpha1.TransportServer: + case *conf_v1.TransportServer: k = transportserver case *v1beta1.DosProtectedResource: k = appProtectDosProtectedResource diff --git a/pkg/apis/configuration/v1/register.go b/pkg/apis/configuration/v1/register.go index 73924e09d8..67adf8ae68 100644 --- a/pkg/apis/configuration/v1/register.go +++ b/pkg/apis/configuration/v1/register.go @@ -34,6 +34,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VirtualServerList{}, &VirtualServerRoute{}, &VirtualServerRouteList{}, + &TransportServer{}, + &TransportServerList{}, + &GlobalConfiguration{}, + &GlobalConfigurationList{}, &Policy{}, &PolicyList{}, ) diff --git a/pkg/apis/configuration/v1/types.go b/pkg/apis/configuration/v1/types.go index c28b13e984..923cb00be6 100644 --- a/pkg/apis/configuration/v1/types.go +++ b/pkg/apis/configuration/v1/types.go @@ -13,6 +13,10 @@ const ( StateInvalid = "Invalid" // HTTPProtocol defines a constant for the HTTP protocol in GlobalConfinguration. HTTPProtocol = "HTTP" + // TLSPassthroughListenerName is the name of a built-in TLS Passthrough listener. + TLSPassthroughListenerName = "tls-passthrough" + // TLSPassthroughListenerProtocol is the protocol of a built-in TLS Passthrough listener. + TLSPassthroughListenerProtocol = "TLS_PASSTHROUGH" ) // +genclient @@ -38,24 +42,24 @@ type VirtualServer struct { // VirtualServerSpec is the spec of the VirtualServer resource. type VirtualServerSpec struct { - IngressClass string `json:"ingressClassName"` - Host string `json:"host"` - Listener *Listener `json:"listener"` - TLS *TLS `json:"tls"` - Gunzip bool `json:"gunzip"` - Policies []PolicyReference `json:"policies"` - Upstreams []Upstream `json:"upstreams"` - Routes []Route `json:"routes"` - HTTPSnippets string `json:"http-snippets"` - ServerSnippets string `json:"server-snippets"` - Dos string `json:"dos"` - ExternalDNS ExternalDNS `json:"externalDNS"` + IngressClass string `json:"ingressClassName"` + Host string `json:"host"` + Listener *VirtualServerListener `json:"listener"` + TLS *TLS `json:"tls"` + Gunzip bool `json:"gunzip"` + Policies []PolicyReference `json:"policies"` + Upstreams []Upstream `json:"upstreams"` + Routes []Route `json:"routes"` + HTTPSnippets string `json:"http-snippets"` + ServerSnippets string `json:"server-snippets"` + Dos string `json:"dos"` + ExternalDNS ExternalDNS `json:"externalDNS"` // InternalRoute allows for the configuration of internal routing. InternalRoute bool `json:"internalRoute"` } -// Listener references a custom http and/or https listener defined in GlobalConfiguration. -type Listener struct { +// VirtualServerListener references a custom http and/or https listener defined in GlobalConfiguration. +type VirtualServerListener struct { HTTP string `json:"http"` HTTPS string `json:"https"` } @@ -382,6 +386,155 @@ type VirtualServerRouteStatus struct { ExternalEndpoints []ExternalEndpoint `json:"externalEndpoints,omitempty"` } +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion +// +kubebuilder:validation:Optional +// +kubebuilder:resource:shortName=gc + +// GlobalConfiguration defines the GlobalConfiguration resource. +type GlobalConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GlobalConfigurationSpec `json:"spec"` +} + +// GlobalConfigurationSpec is the spec of the GlobalConfiguration resource. +type GlobalConfigurationSpec struct { + Listeners []Listener `json:"listeners"` +} + +// Listener defines a listener. +type Listener struct { + Name string `json:"name"` + Port int `json:"port"` + Protocol string `json:"protocol"` + Ssl bool `json:"ssl"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GlobalConfigurationList is a list of the GlobalConfiguration resources. +type GlobalConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []GlobalConfiguration `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:validation:Optional +// +kubebuilder:resource:shortName=ts +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`,description="Current state of the TransportServer. If the resource has a valid status, it means it has been validated and accepted by the Ingress Controller." +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.reason` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// TransportServer defines the TransportServer resource. +type TransportServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TransportServerSpec `json:"spec"` + Status TransportServerStatus `json:"status"` +} + +// TransportServerSpec is the spec of the TransportServer resource. +type TransportServerSpec struct { + IngressClass string `json:"ingressClassName"` + TLS *TransportServerTLS `json:"tls"` + Listener TransportServerListener `json:"listener"` + ServerSnippets string `json:"serverSnippets"` + StreamSnippets string `json:"streamSnippets"` + Host string `json:"host"` + Upstreams []TransportServerUpstream `json:"upstreams"` + UpstreamParameters *UpstreamParameters `json:"upstreamParameters"` + SessionParameters *SessionParameters `json:"sessionParameters"` + Action *TransportServerAction `json:"action"` +} + +// TransportServerTLS defines TransportServerTLS configuration for a TransportServer. +type TransportServerTLS struct { + Secret string `json:"secret"` +} + +// TransportServerListener defines a listener for a TransportServer. +type TransportServerListener struct { + Name string `json:"name"` + Protocol string `json:"protocol"` +} + +// TransportServerUpstream defines an upstream. +type TransportServerUpstream struct { + Name string `json:"name"` + Service string `json:"service"` + Port int `json:"port"` + FailTimeout string `json:"failTimeout"` + MaxFails *int `json:"maxFails"` + MaxConns *int `json:"maxConns"` + HealthCheck *TransportServerHealthCheck `json:"healthCheck"` + LoadBalancingMethod string `json:"loadBalancingMethod"` +} + +// TransportServerHealthCheck defines the parameters for active Upstream HealthChecks. +type TransportServerHealthCheck struct { + Enabled bool `json:"enable"` + Timeout string `json:"timeout"` + Jitter string `json:"jitter"` + Port int `json:"port"` + Interval string `json:"interval"` + Passes int `json:"passes"` + Fails int `json:"fails"` + Match *TransportServerMatch `json:"match"` +} + +// TransportServerMatch defines the parameters of a custom health check. +type TransportServerMatch struct { + Send string `json:"send"` + Expect string `json:"expect"` +} + +// UpstreamParameters defines parameters for an upstream. +type UpstreamParameters struct { + UDPRequests *int `json:"udpRequests"` + UDPResponses *int `json:"udpResponses"` + + ConnectTimeout string `json:"connectTimeout"` + NextUpstream bool `json:"nextUpstream"` + NextUpstreamTimeout string `json:"nextUpstreamTimeout"` + NextUpstreamTries int `json:"nextUpstreamTries"` +} + +// SessionParameters defines session parameters. +type SessionParameters struct { + Timeout string `json:"timeout"` +} + +// TransportServerAction defines an action. +type TransportServerAction struct { + Pass string `json:"pass"` +} + +// TransportServerStatus defines the status for the TransportServer resource. +type TransportServerStatus struct { + State string `json:"state"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TransportServerList is a list of the TransportServer resources. +type TransportServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []TransportServer `json:"items"` +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:validation:Optional diff --git a/pkg/apis/configuration/v1/zz_generated.deepcopy.go b/pkg/apis/configuration/v1/zz_generated.deepcopy.go index 98e086d6c7..eff7270ab8 100644 --- a/pkg/apis/configuration/v1/zz_generated.deepcopy.go +++ b/pkg/apis/configuration/v1/zz_generated.deepcopy.go @@ -329,6 +329,87 @@ func (in *ExternalEndpoint) DeepCopy() *ExternalEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalConfiguration) DeepCopyInto(out *GlobalConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfiguration. +func (in *GlobalConfiguration) DeepCopy() *GlobalConfiguration { + if in == nil { + return nil + } + out := new(GlobalConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalConfigurationList) DeepCopyInto(out *GlobalConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GlobalConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfigurationList. +func (in *GlobalConfigurationList) DeepCopy() *GlobalConfigurationList { + if in == nil { + return nil + } + out := new(GlobalConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalConfigurationSpec) DeepCopyInto(out *GlobalConfigurationSpec) { + *out = *in + if in.Listeners != nil { + in, out := &in.Listeners, &out.Listeners + *out = make([]Listener, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfigurationSpec. +func (in *GlobalConfigurationSpec) DeepCopy() *GlobalConfigurationSpec { + if in == nil { + return nil + } + out := new(GlobalConfigurationSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Header) DeepCopyInto(out *Header) { *out = *in @@ -855,6 +936,22 @@ func (in *SessionCookie) DeepCopy() *SessionCookie { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionParameters) DeepCopyInto(out *SessionParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionParameters. +func (in *SessionParameters) DeepCopy() *SessionParameters { + if in == nil { + return nil + } + out := new(SessionParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Split) DeepCopyInto(out *Split) { *out = *in @@ -923,6 +1020,243 @@ func (in *TLSRedirect) DeepCopy() *TLSRedirect { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServer) DeepCopyInto(out *TransportServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServer. +func (in *TransportServer) DeepCopy() *TransportServer { + if in == nil { + return nil + } + out := new(TransportServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TransportServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerAction) DeepCopyInto(out *TransportServerAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerAction. +func (in *TransportServerAction) DeepCopy() *TransportServerAction { + if in == nil { + return nil + } + out := new(TransportServerAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerHealthCheck) DeepCopyInto(out *TransportServerHealthCheck) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TransportServerMatch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerHealthCheck. +func (in *TransportServerHealthCheck) DeepCopy() *TransportServerHealthCheck { + if in == nil { + return nil + } + out := new(TransportServerHealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerList) DeepCopyInto(out *TransportServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TransportServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerList. +func (in *TransportServerList) DeepCopy() *TransportServerList { + if in == nil { + return nil + } + out := new(TransportServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TransportServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerListener) DeepCopyInto(out *TransportServerListener) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerListener. +func (in *TransportServerListener) DeepCopy() *TransportServerListener { + if in == nil { + return nil + } + out := new(TransportServerListener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerMatch) DeepCopyInto(out *TransportServerMatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerMatch. +func (in *TransportServerMatch) DeepCopy() *TransportServerMatch { + if in == nil { + return nil + } + out := new(TransportServerMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerSpec) DeepCopyInto(out *TransportServerSpec) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TransportServerTLS) + **out = **in + } + out.Listener = in.Listener + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]TransportServerUpstream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpstreamParameters != nil { + in, out := &in.UpstreamParameters, &out.UpstreamParameters + *out = new(UpstreamParameters) + (*in).DeepCopyInto(*out) + } + if in.SessionParameters != nil { + in, out := &in.SessionParameters, &out.SessionParameters + *out = new(SessionParameters) + **out = **in + } + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(TransportServerAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerSpec. +func (in *TransportServerSpec) DeepCopy() *TransportServerSpec { + if in == nil { + return nil + } + out := new(TransportServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerStatus) DeepCopyInto(out *TransportServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerStatus. +func (in *TransportServerStatus) DeepCopy() *TransportServerStatus { + if in == nil { + return nil + } + out := new(TransportServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerTLS) DeepCopyInto(out *TransportServerTLS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerTLS. +func (in *TransportServerTLS) DeepCopy() *TransportServerTLS { + if in == nil { + return nil + } + out := new(TransportServerTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerUpstream) DeepCopyInto(out *TransportServerUpstream) { + *out = *in + if in.MaxFails != nil { + in, out := &in.MaxFails, &out.MaxFails + *out = new(int) + **out = **in + } + if in.MaxConns != nil { + in, out := &in.MaxConns, &out.MaxConns + *out = new(int) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(TransportServerHealthCheck) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerUpstream. +func (in *TransportServerUpstream) DeepCopy() *TransportServerUpstream { + if in == nil { + return nil + } + out := new(TransportServerUpstream) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Upstream) DeepCopyInto(out *Upstream) { *out = *in @@ -1003,6 +1337,32 @@ func (in *UpstreamBuffers) DeepCopy() *UpstreamBuffers { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpstreamParameters) DeepCopyInto(out *UpstreamParameters) { + *out = *in + if in.UDPRequests != nil { + in, out := &in.UDPRequests, &out.UDPRequests + *out = new(int) + **out = **in + } + if in.UDPResponses != nil { + in, out := &in.UDPResponses, &out.UDPResponses + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamParameters. +func (in *UpstreamParameters) DeepCopy() *UpstreamParameters { + if in == nil { + return nil + } + out := new(UpstreamParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UpstreamQueue) DeepCopyInto(out *UpstreamQueue) { *out = *in @@ -1096,6 +1456,22 @@ func (in *VirtualServerList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServerListener) DeepCopyInto(out *VirtualServerListener) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServerListener. +func (in *VirtualServerListener) DeepCopy() *VirtualServerListener { + if in == nil { + return nil + } + out := new(VirtualServerListener) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VirtualServerRoute) DeepCopyInto(out *VirtualServerRoute) { *out = *in @@ -1213,7 +1589,7 @@ func (in *VirtualServerSpec) DeepCopyInto(out *VirtualServerSpec) { *out = *in if in.Listener != nil { in, out := &in.Listener, &out.Listener - *out = new(Listener) + *out = new(VirtualServerListener) **out = **in } if in.TLS != nil { diff --git a/pkg/apis/configuration/v1alpha1/types.go b/pkg/apis/configuration/v1alpha1/types.go index bab9796c3b..bd8b266ba2 100644 --- a/pkg/apis/configuration/v1alpha1/types.go +++ b/pkg/apis/configuration/v1alpha1/types.go @@ -67,20 +67,20 @@ type TransportServer struct { // TransportServerSpec is the spec of the TransportServer resource. type TransportServerSpec struct { - IngressClass string `json:"ingressClassName"` - TLS *TLS `json:"tls"` - Listener TransportServerListener `json:"listener"` - ServerSnippets string `json:"serverSnippets"` - StreamSnippets string `json:"streamSnippets"` - Host string `json:"host"` - Upstreams []Upstream `json:"upstreams"` - UpstreamParameters *UpstreamParameters `json:"upstreamParameters"` - SessionParameters *SessionParameters `json:"sessionParameters"` - Action *Action `json:"action"` -} - -// TLS defines TLS configuration for a TransportServer. -type TLS struct { + IngressClass string `json:"ingressClassName"` + TLS *TransportServerTLS `json:"tls"` + Listener TransportServerListener `json:"listener"` + ServerSnippets string `json:"serverSnippets"` + StreamSnippets string `json:"streamSnippets"` + Host string `json:"host"` + Upstreams []TransportServerUpstream `json:"upstreams"` + UpstreamParameters *UpstreamParameters `json:"upstreamParameters"` + SessionParameters *SessionParameters `json:"sessionParameters"` + Action *TransportServerAction `json:"action"` +} + +// TransportServerTLS defines TransportServerTLS configuration for a TransportServer. +type TransportServerTLS struct { Secret string `json:"secret"` } @@ -90,32 +90,32 @@ type TransportServerListener struct { Protocol string `json:"protocol"` } -// Upstream defines an upstream. -type Upstream struct { - Name string `json:"name"` - Service string `json:"service"` - Port int `json:"port"` - FailTimeout string `json:"failTimeout"` - MaxFails *int `json:"maxFails"` - MaxConns *int `json:"maxConns"` - HealthCheck *HealthCheck `json:"healthCheck"` - LoadBalancingMethod string `json:"loadBalancingMethod"` -} - -// HealthCheck defines the parameters for active Upstream HealthChecks. -type HealthCheck struct { - Enabled bool `json:"enable"` - Timeout string `json:"timeout"` - Jitter string `json:"jitter"` - Port int `json:"port"` - Interval string `json:"interval"` - Passes int `json:"passes"` - Fails int `json:"fails"` - Match *Match `json:"match"` -} - -// Match defines the parameters of a custom health check. -type Match struct { +// TransportServerUpstream defines an upstream. +type TransportServerUpstream struct { + Name string `json:"name"` + Service string `json:"service"` + Port int `json:"port"` + FailTimeout string `json:"failTimeout"` + MaxFails *int `json:"maxFails"` + MaxConns *int `json:"maxConns"` + HealthCheck *TransportServerHealthCheck `json:"healthCheck"` + LoadBalancingMethod string `json:"loadBalancingMethod"` +} + +// TransportServerHealthCheck defines the parameters for active Upstream HealthChecks. +type TransportServerHealthCheck struct { + Enabled bool `json:"enable"` + Timeout string `json:"timeout"` + Jitter string `json:"jitter"` + Port int `json:"port"` + Interval string `json:"interval"` + Passes int `json:"passes"` + Fails int `json:"fails"` + Match *TransportServerMatch `json:"match"` +} + +// TransportServerMatch defines the parameters of a custom health check. +type TransportServerMatch struct { Send string `json:"send"` Expect string `json:"expect"` } @@ -136,8 +136,8 @@ type SessionParameters struct { Timeout string `json:"timeout"` } -// Action defines an action. -type Action struct { +// TransportServerAction defines an action. +type TransportServerAction struct { Pass string `json:"pass"` } diff --git a/pkg/apis/configuration/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/configuration/v1alpha1/zz_generated.deepcopy.go index b4176481f5..9564cefb0e 100644 --- a/pkg/apis/configuration/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/configuration/v1alpha1/zz_generated.deepcopy.go @@ -35,22 +35,6 @@ func (in *AccessControl) DeepCopy() *AccessControl { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Action) DeepCopyInto(out *Action) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Action. -func (in *Action) DeepCopy() *Action { - if in == nil { - return nil - } - out := new(Action) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressMTLS) DeepCopyInto(out *EgressMTLS) { *out = *in @@ -158,27 +142,6 @@ func (in *GlobalConfigurationSpec) DeepCopy() *GlobalConfigurationSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthCheck) DeepCopyInto(out *HealthCheck) { - *out = *in - if in.Match != nil { - in, out := &in.Match, &out.Match - *out = new(Match) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck. -func (in *HealthCheck) DeepCopy() *HealthCheck { - if in == nil { - return nil - } - out := new(HealthCheck) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressMTLS) DeepCopyInto(out *IngressMTLS) { *out = *in @@ -232,22 +195,6 @@ func (in *Listener) DeepCopy() *Listener { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Match) DeepCopyInto(out *Match) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Match. -func (in *Match) DeepCopy() *Match { - if in == nil { - return nil - } - out := new(Match) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Policy) DeepCopyInto(out *Policy) { *out = *in @@ -406,22 +353,6 @@ func (in *SessionParameters) DeepCopy() *SessionParameters { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TLS) DeepCopyInto(out *TLS) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLS. -func (in *TLS) DeepCopy() *TLS { - if in == nil { - return nil - } - out := new(TLS) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TransportServer) DeepCopyInto(out *TransportServer) { *out = *in @@ -450,6 +381,43 @@ func (in *TransportServer) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerAction) DeepCopyInto(out *TransportServerAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerAction. +func (in *TransportServerAction) DeepCopy() *TransportServerAction { + if in == nil { + return nil + } + out := new(TransportServerAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerHealthCheck) DeepCopyInto(out *TransportServerHealthCheck) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(TransportServerMatch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerHealthCheck. +func (in *TransportServerHealthCheck) DeepCopy() *TransportServerHealthCheck { + if in == nil { + return nil + } + out := new(TransportServerHealthCheck) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TransportServerList) DeepCopyInto(out *TransportServerList) { *out = *in @@ -499,18 +467,34 @@ func (in *TransportServerListener) DeepCopy() *TransportServerListener { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerMatch) DeepCopyInto(out *TransportServerMatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerMatch. +func (in *TransportServerMatch) DeepCopy() *TransportServerMatch { + if in == nil { + return nil + } + out := new(TransportServerMatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TransportServerSpec) DeepCopyInto(out *TransportServerSpec) { *out = *in if in.TLS != nil { in, out := &in.TLS, &out.TLS - *out = new(TLS) + *out = new(TransportServerTLS) **out = **in } out.Listener = in.Listener if in.Upstreams != nil { in, out := &in.Upstreams, &out.Upstreams - *out = make([]Upstream, len(*in)) + *out = make([]TransportServerUpstream, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -527,7 +511,7 @@ func (in *TransportServerSpec) DeepCopyInto(out *TransportServerSpec) { } if in.Action != nil { in, out := &in.Action, &out.Action - *out = new(Action) + *out = new(TransportServerAction) **out = **in } return @@ -560,7 +544,23 @@ func (in *TransportServerStatus) DeepCopy() *TransportServerStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Upstream) DeepCopyInto(out *Upstream) { +func (in *TransportServerTLS) DeepCopyInto(out *TransportServerTLS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerTLS. +func (in *TransportServerTLS) DeepCopy() *TransportServerTLS { + if in == nil { + return nil + } + out := new(TransportServerTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportServerUpstream) DeepCopyInto(out *TransportServerUpstream) { *out = *in if in.MaxFails != nil { in, out := &in.MaxFails, &out.MaxFails @@ -574,18 +574,18 @@ func (in *Upstream) DeepCopyInto(out *Upstream) { } if in.HealthCheck != nil { in, out := &in.HealthCheck, &out.HealthCheck - *out = new(HealthCheck) + *out = new(TransportServerHealthCheck) (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Upstream. -func (in *Upstream) DeepCopy() *Upstream { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportServerUpstream. +func (in *TransportServerUpstream) DeepCopy() *TransportServerUpstream { if in == nil { return nil } - out := new(Upstream) + out := new(TransportServerUpstream) in.DeepCopyInto(out) return out } diff --git a/pkg/apis/configuration/validation/globalconfiguration.go b/pkg/apis/configuration/validation/globalconfiguration.go index b6333f671f..4ea32b986e 100644 --- a/pkg/apis/configuration/validation/globalconfiguration.go +++ b/pkg/apis/configuration/validation/globalconfiguration.go @@ -5,7 +5,7 @@ import ( "sort" "strings" - "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" + conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" @@ -30,16 +30,16 @@ func NewGlobalConfigurationValidator(forbiddenListenerPorts map[int]bool) *Globa } // ValidateGlobalConfiguration validates a GlobalConfiguration. -func (gcv *GlobalConfigurationValidator) ValidateGlobalConfiguration(globalConfiguration *v1alpha1.GlobalConfiguration) error { +func (gcv *GlobalConfigurationValidator) ValidateGlobalConfiguration(globalConfiguration *conf_v1.GlobalConfiguration) error { allErrs := gcv.validateGlobalConfigurationSpec(&globalConfiguration.Spec, field.NewPath("spec")) return allErrs.ToAggregate() } -func (gcv *GlobalConfigurationValidator) validateGlobalConfigurationSpec(spec *v1alpha1.GlobalConfigurationSpec, fieldPath *field.Path) field.ErrorList { +func (gcv *GlobalConfigurationValidator) validateGlobalConfigurationSpec(spec *conf_v1.GlobalConfigurationSpec, fieldPath *field.Path) field.ErrorList { return gcv.validateListeners(spec.Listeners, fieldPath.Child("listeners")) } -func (gcv *GlobalConfigurationValidator) validateListeners(listeners []v1alpha1.Listener, fieldPath *field.Path) field.ErrorList { +func (gcv *GlobalConfigurationValidator) validateListeners(listeners []conf_v1.Listener, fieldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} listenerNames := sets.Set[string]{} @@ -91,7 +91,7 @@ func generatePortProtocolKey(port int, protocol string) string { return fmt.Sprintf("%d/%s", port, protocol) } -func (gcv *GlobalConfigurationValidator) validateListener(listener v1alpha1.Listener, fieldPath *field.Path) field.ErrorList { +func (gcv *GlobalConfigurationValidator) validateListener(listener conf_v1.Listener, fieldPath *field.Path) field.ErrorList { allErrs := validateGlobalConfigurationListenerName(listener.Name, fieldPath.Child("name")) allErrs = append(allErrs, gcv.validateListenerPort(listener.Port, fieldPath.Child("port"))...) allErrs = append(allErrs, validateListenerProtocol(listener.Protocol, fieldPath.Child("protocol"))...) @@ -100,7 +100,7 @@ func (gcv *GlobalConfigurationValidator) validateListener(listener v1alpha1.List } func validateGlobalConfigurationListenerName(name string, fieldPath *field.Path) field.ErrorList { - if name == v1alpha1.TLSPassthroughListenerName { + if name == conf_v1.TLSPassthroughListenerName { return field.ErrorList{field.Forbidden(fieldPath, "is the name of a built-in listener")} } return validateListenerName(name, fieldPath) diff --git a/pkg/apis/configuration/validation/globalconfiguration_test.go b/pkg/apis/configuration/validation/globalconfiguration_test.go index 3a3283eefd..abf0e26199 100644 --- a/pkg/apis/configuration/validation/globalconfiguration_test.go +++ b/pkg/apis/configuration/validation/globalconfiguration_test.go @@ -3,7 +3,7 @@ package validation import ( "testing" - "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" + conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -13,9 +13,9 @@ func createGlobalConfigurationValidator() *GlobalConfigurationValidator { func TestValidateGlobalConfiguration(t *testing.T) { t.Parallel() - globalConfiguration := v1alpha1.GlobalConfiguration{ - Spec: v1alpha1.GlobalConfigurationSpec{ - Listeners: []v1alpha1.Listener{ + globalConfiguration := conf_v1.GlobalConfiguration{ + Spec: conf_v1.GlobalConfigurationSpec{ + Listeners: []conf_v1.Listener{ { Name: "tcp-listener", Port: 53, @@ -61,7 +61,7 @@ func TestValidateListenerPort(t *testing.T) { func TestValidateListeners(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-listener", Port: 53, @@ -85,11 +85,11 @@ func TestValidateListeners(t *testing.T) { func TestValidateListenersFails(t *testing.T) { t.Parallel() tests := []struct { - listeners []v1alpha1.Listener + listeners []conf_v1.Listener msg string }{ { - listeners: []v1alpha1.Listener{ + listeners: []conf_v1.Listener{ { Name: "tcp-listener", Port: 2201, @@ -104,7 +104,7 @@ func TestValidateListenersFails(t *testing.T) { msg: "duplicated name", }, { - listeners: []v1alpha1.Listener{ + listeners: []conf_v1.Listener{ { Name: "tcp-listener-1", Port: 2201, @@ -132,7 +132,7 @@ func TestValidateListenersFails(t *testing.T) { func TestValidateListener(t *testing.T) { t.Parallel() - listener := v1alpha1.Listener{ + listener := conf_v1.Listener{ Name: "tcp-listener", Port: 53, Protocol: "TCP", @@ -149,11 +149,11 @@ func TestValidateListener(t *testing.T) { func TestValidateListenerFails(t *testing.T) { t.Parallel() tests := []struct { - Listener v1alpha1.Listener + Listener conf_v1.Listener msg string }{ { - Listener: v1alpha1.Listener{ + Listener: conf_v1.Listener{ Name: "@", Port: 2201, Protocol: "TCP", @@ -161,7 +161,7 @@ func TestValidateListenerFails(t *testing.T) { msg: "invalid name", }, { - Listener: v1alpha1.Listener{ + Listener: conf_v1.Listener{ Name: "tcp-listener", Port: -1, Protocol: "TCP", @@ -169,7 +169,7 @@ func TestValidateListenerFails(t *testing.T) { msg: "invalid port", }, { - Listener: v1alpha1.Listener{ + Listener: conf_v1.Listener{ Name: "name", Port: 2201, Protocol: "IP", @@ -177,7 +177,7 @@ func TestValidateListenerFails(t *testing.T) { msg: "invalid protocol", }, { - Listener: v1alpha1.Listener{ + Listener: conf_v1.Listener{ Name: "tls-passthrough", Port: 2201, Protocol: "TCP", @@ -244,7 +244,7 @@ func TestValidateListenerProtocol_PassesOnValidInput(t *testing.T) { func TestValidateListenerProtocol_PassesOnHttpListenerUsingDiffPortToTCPAndUDPListenerWithTCPAndUDPDefinedFirst(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-listener", Port: 53, @@ -272,7 +272,7 @@ func TestValidateListenerProtocol_PassesOnHttpListenerUsingDiffPortToTCPAndUDPLi func TestValidateListenerProtocol_PassesOnHttpListenerUsingDiffPortToTCPAndUDPListenerWithHTTPDefinedFirst(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "http-listener", Port: 63, @@ -300,7 +300,7 @@ func TestValidateListenerProtocol_PassesOnHttpListenerUsingDiffPortToTCPAndUDPLi func TestValidateListenerProtocol_FailsOnHttpListenerUsingSamePortAsTCPListener(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-listener", Port: 53, @@ -323,7 +323,7 @@ func TestValidateListenerProtocol_FailsOnHttpListenerUsingSamePortAsTCPListener( func TestValidateListenerProtocol_FailsOnHttpListenerUsingSamePortAsUDPListener(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "udp-listener", Port: 53, @@ -346,7 +346,7 @@ func TestValidateListenerProtocol_FailsOnHttpListenerUsingSamePortAsUDPListener( func TestValidateListenerProtocol_FailsOnHttpListenerUsingSamePortAsTCPAndUDPListener(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "tcp-listener", Port: 53, @@ -374,7 +374,7 @@ func TestValidateListenerProtocol_FailsOnHttpListenerUsingSamePortAsTCPAndUDPLis func TestValidateListenerProtocol_FailsOnTCPListenerUsingSamePortAsHTTPListener(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "http-listener", Port: 53, @@ -397,7 +397,7 @@ func TestValidateListenerProtocol_FailsOnTCPListenerUsingSamePortAsHTTPListener( func TestValidateListenerProtocol_FailsOnUDPListenerUsingSamePortAsHTTPListener(t *testing.T) { t.Parallel() - listeners := []v1alpha1.Listener{ + listeners := []conf_v1.Listener{ { Name: "http-listener", Port: 53, diff --git a/pkg/apis/configuration/validation/transportserver.go b/pkg/apis/configuration/validation/transportserver.go index 7b975a6934..7a3648dff4 100644 --- a/pkg/apis/configuration/validation/transportserver.go +++ b/pkg/apis/configuration/validation/transportserver.go @@ -6,7 +6,7 @@ import ( "regexp" "strings" - "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" + conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" @@ -29,12 +29,12 @@ func NewTransportServerValidator(tlsPassthrough bool, snippetsEnabled bool, isPl } // ValidateTransportServer validates a TransportServer. -func (tsv *TransportServerValidator) ValidateTransportServer(transportServer *v1alpha1.TransportServer) error { +func (tsv *TransportServerValidator) ValidateTransportServer(transportServer *conf_v1.TransportServer) error { allErrs := tsv.validateTransportServerSpec(&transportServer.Spec, field.NewPath("spec")) return allErrs.ToAggregate() } -func (tsv *TransportServerValidator) validateTransportServerSpec(spec *v1alpha1.TransportServerSpec, fieldPath *field.Path) field.ErrorList { +func (tsv *TransportServerValidator) validateTransportServerSpec(spec *conf_v1.TransportServerSpec, fieldPath *field.Path) field.ErrorList { allErrs := tsv.validateTransportListener(&spec.Listener, fieldPath.Child("listener")) isTLSPassthroughListener := isPotentialTLSPassthroughListener(&spec.Listener) @@ -62,7 +62,7 @@ func (tsv *TransportServerValidator) validateTransportServerSpec(spec *v1alpha1. return allErrs } -func validateTLS(tls *v1alpha1.TLS, isTLSPassthrough bool, fieldPath *field.Path) field.ErrorList { +func validateTLS(tls *conf_v1.TransportServerTLS, isTLSPassthrough bool, fieldPath *field.Path) field.ErrorList { if tls == nil { return nil } @@ -92,7 +92,7 @@ func validateTransportServerHost(host string, fieldPath *field.Path, isTLSPassth return validateHost(host, fieldPath) } -func (tsv *TransportServerValidator) validateTransportListener(listener *v1alpha1.TransportServerListener, fieldPath *field.Path) field.ErrorList { +func (tsv *TransportServerValidator) validateTransportListener(listener *conf_v1.TransportServerListener, fieldPath *field.Path) field.ErrorList { if isPotentialTLSPassthroughListener(listener) { return tsv.validateTLSPassthroughListener(listener, fieldPath) } @@ -100,26 +100,26 @@ func (tsv *TransportServerValidator) validateTransportListener(listener *v1alpha return validateRegularListener(listener, fieldPath) } -func validateRegularListener(listener *v1alpha1.TransportServerListener, fieldPath *field.Path) field.ErrorList { +func validateRegularListener(listener *conf_v1.TransportServerListener, fieldPath *field.Path) field.ErrorList { allErrs := validateListenerName(listener.Name, fieldPath.Child("name")) allErrs = append(allErrs, validateListenerProtocol(listener.Protocol, fieldPath.Child("protocol"))...) return allErrs } -func isPotentialTLSPassthroughListener(listener *v1alpha1.TransportServerListener) bool { - return listener.Name == v1alpha1.TLSPassthroughListenerName || listener.Protocol == v1alpha1.TLSPassthroughListenerProtocol +func isPotentialTLSPassthroughListener(listener *conf_v1.TransportServerListener) bool { + return listener.Name == conf_v1.TLSPassthroughListenerName || listener.Protocol == conf_v1.TLSPassthroughListenerProtocol } -func (tsv *TransportServerValidator) validateTLSPassthroughListener(listener *v1alpha1.TransportServerListener, fieldPath *field.Path) field.ErrorList { +func (tsv *TransportServerValidator) validateTLSPassthroughListener(listener *conf_v1.TransportServerListener, fieldPath *field.Path) field.ErrorList { if !tsv.tlsPassthrough { return field.ErrorList{field.Forbidden(fieldPath, "TLS Passthrough is not enabled")} } - if listener.Name == v1alpha1.TLSPassthroughListenerName && listener.Protocol != v1alpha1.TLSPassthroughListenerProtocol { - msg := fmt.Sprintf("must be '%s' for the built-in %s listener", v1alpha1.TLSPassthroughListenerProtocol, v1alpha1.TLSPassthroughListenerName) + if listener.Name == conf_v1.TLSPassthroughListenerName && listener.Protocol != conf_v1.TLSPassthroughListenerProtocol { + msg := fmt.Sprintf("must be '%s' for the built-in %s listener", conf_v1.TLSPassthroughListenerProtocol, conf_v1.TLSPassthroughListenerName) return field.ErrorList{field.Invalid(fieldPath.Child("protocol"), listener.Protocol, msg)} } - if listener.Protocol == v1alpha1.TLSPassthroughListenerProtocol && listener.Name != v1alpha1.TLSPassthroughListenerName { - msg := fmt.Sprintf("must be '%s' for a listener with the protocol %s", v1alpha1.TLSPassthroughListenerName, v1alpha1.TLSPassthroughListenerProtocol) + if listener.Protocol == conf_v1.TLSPassthroughListenerProtocol && listener.Name != conf_v1.TLSPassthroughListenerName { + msg := fmt.Sprintf("must be '%s' for a listener with the protocol %s", conf_v1.TLSPassthroughListenerName, conf_v1.TLSPassthroughListenerProtocol) return field.ErrorList{field.Invalid(fieldPath.Child("name"), listener.Name, msg)} } return nil @@ -129,7 +129,7 @@ func validateListenerName(name string, fieldPath *field.Path) field.ErrorList { return validateDNS1035Label(name, fieldPath) } -func validateTransportServerUpstreams(upstreams []v1alpha1.Upstream, fieldPath *field.Path, isPlus bool) (allErrs field.ErrorList, upstreamNames sets.Set[string]) { +func validateTransportServerUpstreams(upstreams []conf_v1.TransportServerUpstream, fieldPath *field.Path, isPlus bool) (allErrs field.ErrorList, upstreamNames sets.Set[string]) { allErrs = field.ErrorList{} upstreamNames = sets.Set[string]{} @@ -229,7 +229,7 @@ func validateHashLoadBalancingMethod(method string, fieldPath *field.Path, isPlu return allErrs } -func validateTSUpstreamHealthChecks(hc *v1alpha1.HealthCheck, fieldPath *field.Path) field.ErrorList { +func validateTSUpstreamHealthChecks(hc *conf_v1.TransportServerHealthCheck, fieldPath *field.Path) field.ErrorList { if hc == nil { return nil } @@ -248,7 +248,7 @@ func validateTSUpstreamHealthChecks(hc *v1alpha1.HealthCheck, fieldPath *field.P return allErrs } -func validateHealthCheckMatch(match *v1alpha1.Match, fieldPath *field.Path) field.ErrorList { +func validateHealthCheckMatch(match *conf_v1.TransportServerMatch, fieldPath *field.Path) field.ErrorList { if match == nil { return nil } @@ -323,7 +323,7 @@ func validateHexString(s string) error { return nil } -func validateTransportServerUpstreamParameters(upstreamParameters *v1alpha1.UpstreamParameters, fieldPath *field.Path, protocol string) field.ErrorList { +func validateTransportServerUpstreamParameters(upstreamParameters *conf_v1.UpstreamParameters, fieldPath *field.Path, protocol string) field.ErrorList { if upstreamParameters == nil { return nil } @@ -336,7 +336,7 @@ func validateTransportServerUpstreamParameters(upstreamParameters *v1alpha1.Upst return allErrs } -func validateSessionParameters(sessionParameters *v1alpha1.SessionParameters, fieldPath *field.Path) field.ErrorList { +func validateSessionParameters(sessionParameters *conf_v1.SessionParameters, fieldPath *field.Path) field.ErrorList { if sessionParameters == nil { return nil } @@ -350,7 +350,7 @@ func validateUDPUpstreamParameter(parameter *int, fieldPath *field.Path, protoco return validatePositiveIntOrZeroFromPointer(parameter, fieldPath) } -func validateTransportServerAction(action *v1alpha1.Action, fieldPath *field.Path, upstreamNames sets.Set[string]) field.ErrorList { +func validateTransportServerAction(action *conf_v1.TransportServerAction, fieldPath *field.Path, upstreamNames sets.Set[string]) field.ErrorList { if action.Pass == "" { return field.ErrorList{field.Required(fieldPath, "must specify pass")} } diff --git a/pkg/apis/configuration/validation/transportserver_test.go b/pkg/apis/configuration/validation/transportserver_test.go index 7e27022b4d..30286860e6 100644 --- a/pkg/apis/configuration/validation/transportserver_test.go +++ b/pkg/apis/configuration/validation/transportserver_test.go @@ -3,7 +3,7 @@ package validation import ( "testing" - "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1alpha1" + conf_v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -15,20 +15,20 @@ func createTransportServerValidator() *TransportServerValidator { func TestValidateTransportServer(t *testing.T) { t.Parallel() - ts := v1alpha1.TransportServer{ - Spec: v1alpha1.TransportServerSpec{ - Listener: v1alpha1.TransportServerListener{ + ts := conf_v1.TransportServer{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "upstream1", Service: "test-1", Port: 5501, }, }, - Action: &v1alpha1.Action{ + Action: &conf_v1.TransportServerAction{ Pass: "upstream1", }, }, @@ -44,13 +44,13 @@ func TestValidateTransportServer(t *testing.T) { func TestValidateTransportServer_FailsOnInvalidInput(t *testing.T) { t.Parallel() - ts := v1alpha1.TransportServer{ - Spec: v1alpha1.TransportServerSpec{ - Listener: v1alpha1.TransportServerListener{ + ts := conf_v1.TransportServer{ + Spec: conf_v1.TransportServerSpec{ + Listener: conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, - Upstreams: []v1alpha1.Upstream{ + Upstreams: []conf_v1.TransportServerUpstream{ { Name: "upstream1", Service: "test-1", @@ -72,17 +72,17 @@ func TestValidateTransportServer_FailsOnInvalidInput(t *testing.T) { func TestValidateTransportServerUpstreams(t *testing.T) { t.Parallel() tests := []struct { - upstreams []v1alpha1.Upstream + upstreams []conf_v1.TransportServerUpstream expectedUpstreamNames sets.Set[string] msg string }{ { - upstreams: []v1alpha1.Upstream{}, + upstreams: []conf_v1.TransportServerUpstream{}, expectedUpstreamNames: sets.Set[string]{}, msg: "no upstreams", }, { - upstreams: []v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "upstream1", Service: "test-1", @@ -116,12 +116,12 @@ func TestValidateTransportServerUpstreams(t *testing.T) { func TestValidateTransportServerUpstreams_FailsOnInvalidInput(t *testing.T) { t.Parallel() tests := []struct { - upstreams []v1alpha1.Upstream + upstreams []conf_v1.TransportServerUpstream expectedUpstreamNames sets.Set[string] msg string }{ { - upstreams: []v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "@upstream1", Service: "test-1", @@ -132,7 +132,7 @@ func TestValidateTransportServerUpstreams_FailsOnInvalidInput(t *testing.T) { msg: "invalid upstream name", }, { - upstreams: []v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "upstream1", Service: "@test-1", @@ -145,7 +145,7 @@ func TestValidateTransportServerUpstreams_FailsOnInvalidInput(t *testing.T) { msg: "invalid service", }, { - upstreams: []v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "upstream1", Service: "test-1", @@ -158,7 +158,7 @@ func TestValidateTransportServerUpstreams_FailsOnInvalidInput(t *testing.T) { msg: "invalid port", }, { - upstreams: []v1alpha1.Upstream{ + upstreams: []conf_v1.TransportServerUpstream{ { Name: "upstream1", Service: "test-1", @@ -393,25 +393,25 @@ func TestValidateTransportServerHost_FailsOnInvalidInput(t *testing.T) { func TestValidateTransportListener(t *testing.T) { t.Parallel() tests := []struct { - listener *v1alpha1.TransportServerListener + listener *conf_v1.TransportServerListener tlsPassthrough bool }{ { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, tlsPassthrough: false, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tcp-listener", Protocol: "TCP", }, tlsPassthrough: true, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "TLS_PASSTHROUGH", }, @@ -434,39 +434,39 @@ func TestValidateTransportListener(t *testing.T) { func TestValidateTransportListener_FailsOnInvalidInput(t *testing.T) { t.Parallel() tests := []struct { - listener *v1alpha1.TransportServerListener + listener *conf_v1.TransportServerListener tlsPassthrough bool }{ { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "TLS_PASSTHROUGH", }, tlsPassthrough: false, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "abc", }, tlsPassthrough: true, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "abc", }, tlsPassthrough: false, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "abc", Protocol: "TLS_PASSTHROUGH", }, tlsPassthrough: true, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "abc", Protocol: "TLS_PASSTHROUGH", }, @@ -489,25 +489,25 @@ func TestValidateTransportListener_FailsOnInvalidInput(t *testing.T) { func TestValidateIsPotentialTLSPassthroughListener(t *testing.T) { t.Parallel() tests := []struct { - listener *v1alpha1.TransportServerListener + listener *conf_v1.TransportServerListener expected bool }{ { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tls-passthrough", Protocol: "abc", }, expected: true, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "abc", Protocol: "TLS_PASSTHROUGH", }, expected: true, }, { - listener: &v1alpha1.TransportServerListener{ + listener: &conf_v1.TransportServerListener{ Name: "tcp", Protocol: "TCP", }, @@ -541,7 +541,7 @@ func TestValidateListenerProtocol(t *testing.T) { func TestValidateTSUpstreamHealthChecks(t *testing.T) { t.Parallel() tests := []struct { - healthCheck *v1alpha1.HealthCheck + healthCheck *conf_v1.TransportServerHealthCheck msg string }{ { @@ -549,11 +549,11 @@ func TestValidateTSUpstreamHealthChecks(t *testing.T) { msg: "nil health check", }, { - healthCheck: &v1alpha1.HealthCheck{}, + healthCheck: &conf_v1.TransportServerHealthCheck{}, msg: "non nil health check", }, { - healthCheck: &v1alpha1.HealthCheck{ + healthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "30s", Jitter: "5s", @@ -576,11 +576,11 @@ func TestValidateTSUpstreamHealthChecks(t *testing.T) { func TestValidateTSUpstreamHealthChecks_FailsOnInvalidInput(t *testing.T) { t.Parallel() tests := []struct { - healthCheck *v1alpha1.HealthCheck + healthCheck *conf_v1.TransportServerHealthCheck msg string }{ { - healthCheck: &v1alpha1.HealthCheck{ + healthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "-30s", Jitter: "5s", @@ -592,7 +592,7 @@ func TestValidateTSUpstreamHealthChecks_FailsOnInvalidInput(t *testing.T) { msg: "invalid timeout", }, { - healthCheck: &v1alpha1.HealthCheck{ + healthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "30s", Jitter: "5s", @@ -604,7 +604,7 @@ func TestValidateTSUpstreamHealthChecks_FailsOnInvalidInput(t *testing.T) { msg: "invalid port number", }, { - healthCheck: &v1alpha1.HealthCheck{ + healthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "30s", Jitter: "5s", @@ -616,7 +616,7 @@ func TestValidateTSUpstreamHealthChecks_FailsOnInvalidInput(t *testing.T) { msg: "invalid passes value", }, { - healthCheck: &v1alpha1.HealthCheck{ + healthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "30s", Jitter: "5s", @@ -628,7 +628,7 @@ func TestValidateTSUpstreamHealthChecks_FailsOnInvalidInput(t *testing.T) { msg: "invalid fails value", }, { - healthCheck: &v1alpha1.HealthCheck{ + healthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "30s", Jitter: "5s", @@ -640,7 +640,7 @@ func TestValidateTSUpstreamHealthChecks_FailsOnInvalidInput(t *testing.T) { msg: "invalid interval value", }, { - healthCheck: &v1alpha1.HealthCheck{ + healthCheck: &conf_v1.TransportServerHealthCheck{ Enabled: true, Timeout: "30s", Jitter: "5sec", @@ -664,7 +664,7 @@ func TestValidateTSUpstreamHealthChecks_FailsOnInvalidInput(t *testing.T) { func TestValidateUpstreamParameters(t *testing.T) { t.Parallel() tests := []struct { - parameters *v1alpha1.UpstreamParameters + parameters *conf_v1.UpstreamParameters msg string }{ { @@ -672,7 +672,7 @@ func TestValidateUpstreamParameters(t *testing.T) { msg: "nil parameters", }, { - parameters: &v1alpha1.UpstreamParameters{}, + parameters: &conf_v1.UpstreamParameters{}, msg: "Non-nil parameters", }, } @@ -688,7 +688,7 @@ func TestValidateUpstreamParameters(t *testing.T) { func TestValidateSessionParameters(t *testing.T) { t.Parallel() tests := []struct { - parameters *v1alpha1.SessionParameters + parameters *conf_v1.SessionParameters msg string }{ { @@ -696,11 +696,11 @@ func TestValidateSessionParameters(t *testing.T) { msg: "nil parameters", }, { - parameters: &v1alpha1.SessionParameters{}, + parameters: &conf_v1.SessionParameters{}, msg: "Non-nil parameters", }, { - parameters: &v1alpha1.SessionParameters{ + parameters: &conf_v1.SessionParameters{ Timeout: "60s", }, msg: "valid parameters", @@ -718,11 +718,11 @@ func TestValidateSessionParameters(t *testing.T) { func TestValidateSessionParameters_FailsOnInvalidInput(t *testing.T) { t.Parallel() tests := []struct { - parameters *v1alpha1.SessionParameters + parameters *conf_v1.SessionParameters msg string }{ { - parameters: &v1alpha1.SessionParameters{ + parameters: &conf_v1.SessionParameters{ Timeout: "-1s", }, msg: "invalid timeout", @@ -799,7 +799,7 @@ func TestValidateTransportServerAction(t *testing.T) { "test": {}, } - action := &v1alpha1.Action{ + action := &conf_v1.TransportServerAction{ Pass: "test", } @@ -814,17 +814,17 @@ func TestValidateTransportServerAction_FailsOnInvalidInput(t *testing.T) { upstreamNames := map[string]sets.Empty{} tests := []struct { - action *v1alpha1.Action + action *conf_v1.TransportServerAction msg string }{ { - action: &v1alpha1.Action{ + action: &conf_v1.TransportServerAction{ Pass: "", }, msg: "missing pass field", }, { - action: &v1alpha1.Action{ + action: &conf_v1.TransportServerAction{ Pass: "non-existing", }, msg: "pass references a non-existing upstream", @@ -952,7 +952,7 @@ func TestValidateMatchExpect_FailsOnInvalidInput(t *testing.T) { func TestValidateTsTLS(t *testing.T) { t.Parallel() - validTLSes := []*v1alpha1.TLS{ + validTLSes := []*conf_v1.TransportServerTLS{ nil, { Secret: "my-secret", @@ -970,29 +970,29 @@ func TestValidateTsTLS(t *testing.T) { func TestValidateTsTLS_FailsOnInvalidInput(t *testing.T) { t.Parallel() invalidTLSes := []struct { - tls *v1alpha1.TLS + tls *conf_v1.TransportServerTLS isTLSPassthrough bool }{ { - tls: &v1alpha1.TLS{ + tls: &conf_v1.TransportServerTLS{ Secret: "-", }, isTLSPassthrough: false, }, { - tls: &v1alpha1.TLS{ + tls: &conf_v1.TransportServerTLS{ Secret: "a/b", }, isTLSPassthrough: false, }, { - tls: &v1alpha1.TLS{ + tls: &conf_v1.TransportServerTLS{ Secret: "my-secret", }, isTLSPassthrough: true, }, { - tls: &v1alpha1.TLS{ + tls: &conf_v1.TransportServerTLS{ Secret: "", }, isTLSPassthrough: false, diff --git a/pkg/client/clientset/versioned/typed/configuration/v1/configuration_client.go b/pkg/client/clientset/versioned/typed/configuration/v1/configuration_client.go index c59b6a1b1f..7e6bbef5de 100644 --- a/pkg/client/clientset/versioned/typed/configuration/v1/configuration_client.go +++ b/pkg/client/clientset/versioned/typed/configuration/v1/configuration_client.go @@ -12,7 +12,9 @@ import ( type K8sV1Interface interface { RESTClient() rest.Interface + GlobalConfigurationsGetter PoliciesGetter + TransportServersGetter VirtualServersGetter VirtualServerRoutesGetter } @@ -22,10 +24,18 @@ type K8sV1Client struct { restClient rest.Interface } +func (c *K8sV1Client) GlobalConfigurations(namespace string) GlobalConfigurationInterface { + return newGlobalConfigurations(c, namespace) +} + func (c *K8sV1Client) Policies(namespace string) PolicyInterface { return newPolicies(c, namespace) } +func (c *K8sV1Client) TransportServers(namespace string) TransportServerInterface { + return newTransportServers(c, namespace) +} + func (c *K8sV1Client) VirtualServers(namespace string) VirtualServerInterface { return newVirtualServers(c, namespace) } diff --git a/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_configuration_client.go b/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_configuration_client.go index 3281d34a8a..2c3b958221 100644 --- a/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_configuration_client.go +++ b/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_configuration_client.go @@ -12,10 +12,18 @@ type FakeK8sV1 struct { *testing.Fake } +func (c *FakeK8sV1) GlobalConfigurations(namespace string) v1.GlobalConfigurationInterface { + return &FakeGlobalConfigurations{c, namespace} +} + func (c *FakeK8sV1) Policies(namespace string) v1.PolicyInterface { return &FakePolicies{c, namespace} } +func (c *FakeK8sV1) TransportServers(namespace string) v1.TransportServerInterface { + return &FakeTransportServers{c, namespace} +} + func (c *FakeK8sV1) VirtualServers(namespace string) v1.VirtualServerInterface { return &FakeVirtualServers{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_globalconfiguration.go b/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_globalconfiguration.go new file mode 100644 index 0000000000..3ab58a3634 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_globalconfiguration.go @@ -0,0 +1,113 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeGlobalConfigurations implements GlobalConfigurationInterface +type FakeGlobalConfigurations struct { + Fake *FakeK8sV1 + ns string +} + +var globalconfigurationsResource = v1.SchemeGroupVersion.WithResource("globalconfigurations") + +var globalconfigurationsKind = v1.SchemeGroupVersion.WithKind("GlobalConfiguration") + +// Get takes name of the globalConfiguration, and returns the corresponding globalConfiguration object, and an error if there is any. +func (c *FakeGlobalConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.GlobalConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(globalconfigurationsResource, c.ns, name), &v1.GlobalConfiguration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.GlobalConfiguration), err +} + +// List takes label and field selectors, and returns the list of GlobalConfigurations that match those selectors. +func (c *FakeGlobalConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.GlobalConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(globalconfigurationsResource, globalconfigurationsKind, c.ns, opts), &v1.GlobalConfigurationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.GlobalConfigurationList{ListMeta: obj.(*v1.GlobalConfigurationList).ListMeta} + for _, item := range obj.(*v1.GlobalConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested globalConfigurations. +func (c *FakeGlobalConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(globalconfigurationsResource, c.ns, opts)) + +} + +// Create takes the representation of a globalConfiguration and creates it. Returns the server's representation of the globalConfiguration, and an error, if there is any. +func (c *FakeGlobalConfigurations) Create(ctx context.Context, globalConfiguration *v1.GlobalConfiguration, opts metav1.CreateOptions) (result *v1.GlobalConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(globalconfigurationsResource, c.ns, globalConfiguration), &v1.GlobalConfiguration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.GlobalConfiguration), err +} + +// Update takes the representation of a globalConfiguration and updates it. Returns the server's representation of the globalConfiguration, and an error, if there is any. +func (c *FakeGlobalConfigurations) Update(ctx context.Context, globalConfiguration *v1.GlobalConfiguration, opts metav1.UpdateOptions) (result *v1.GlobalConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(globalconfigurationsResource, c.ns, globalConfiguration), &v1.GlobalConfiguration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.GlobalConfiguration), err +} + +// Delete takes name of the globalConfiguration and deletes it. Returns an error if one occurs. +func (c *FakeGlobalConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(globalconfigurationsResource, c.ns, name, opts), &v1.GlobalConfiguration{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeGlobalConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(globalconfigurationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.GlobalConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched globalConfiguration. +func (c *FakeGlobalConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.GlobalConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(globalconfigurationsResource, c.ns, name, pt, data, subresources...), &v1.GlobalConfiguration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.GlobalConfiguration), err +} diff --git a/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_transportserver.go b/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_transportserver.go new file mode 100644 index 0000000000..4162fb3c09 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/configuration/v1/fake/fake_transportserver.go @@ -0,0 +1,125 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeTransportServers implements TransportServerInterface +type FakeTransportServers struct { + Fake *FakeK8sV1 + ns string +} + +var transportserversResource = v1.SchemeGroupVersion.WithResource("transportservers") + +var transportserversKind = v1.SchemeGroupVersion.WithKind("TransportServer") + +// Get takes name of the transportServer, and returns the corresponding transportServer object, and an error if there is any. +func (c *FakeTransportServers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.TransportServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(transportserversResource, c.ns, name), &v1.TransportServer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.TransportServer), err +} + +// List takes label and field selectors, and returns the list of TransportServers that match those selectors. +func (c *FakeTransportServers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.TransportServerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(transportserversResource, transportserversKind, c.ns, opts), &v1.TransportServerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.TransportServerList{ListMeta: obj.(*v1.TransportServerList).ListMeta} + for _, item := range obj.(*v1.TransportServerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested transportServers. +func (c *FakeTransportServers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(transportserversResource, c.ns, opts)) + +} + +// Create takes the representation of a transportServer and creates it. Returns the server's representation of the transportServer, and an error, if there is any. +func (c *FakeTransportServers) Create(ctx context.Context, transportServer *v1.TransportServer, opts metav1.CreateOptions) (result *v1.TransportServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(transportserversResource, c.ns, transportServer), &v1.TransportServer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.TransportServer), err +} + +// Update takes the representation of a transportServer and updates it. Returns the server's representation of the transportServer, and an error, if there is any. +func (c *FakeTransportServers) Update(ctx context.Context, transportServer *v1.TransportServer, opts metav1.UpdateOptions) (result *v1.TransportServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(transportserversResource, c.ns, transportServer), &v1.TransportServer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.TransportServer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeTransportServers) UpdateStatus(ctx context.Context, transportServer *v1.TransportServer, opts metav1.UpdateOptions) (*v1.TransportServer, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(transportserversResource, "status", c.ns, transportServer), &v1.TransportServer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.TransportServer), err +} + +// Delete takes name of the transportServer and deletes it. Returns an error if one occurs. +func (c *FakeTransportServers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(transportserversResource, c.ns, name, opts), &v1.TransportServer{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeTransportServers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(transportserversResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.TransportServerList{}) + return err +} + +// Patch applies the patch and returns the patched transportServer. +func (c *FakeTransportServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TransportServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(transportserversResource, c.ns, name, pt, data, subresources...), &v1.TransportServer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.TransportServer), err +} diff --git a/pkg/client/clientset/versioned/typed/configuration/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/configuration/v1/generated_expansion.go index 1edd5af74e..cab83be6ec 100644 --- a/pkg/client/clientset/versioned/typed/configuration/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/configuration/v1/generated_expansion.go @@ -2,8 +2,12 @@ package v1 +type GlobalConfigurationExpansion interface{} + type PolicyExpansion interface{} +type TransportServerExpansion interface{} + type VirtualServerExpansion interface{} type VirtualServerRouteExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/configuration/v1/globalconfiguration.go b/pkg/client/clientset/versioned/typed/configuration/v1/globalconfiguration.go new file mode 100644 index 0000000000..a24cdb715d --- /dev/null +++ b/pkg/client/clientset/versioned/typed/configuration/v1/globalconfiguration.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + scheme "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// GlobalConfigurationsGetter has a method to return a GlobalConfigurationInterface. +// A group's client should implement this interface. +type GlobalConfigurationsGetter interface { + GlobalConfigurations(namespace string) GlobalConfigurationInterface +} + +// GlobalConfigurationInterface has methods to work with GlobalConfiguration resources. +type GlobalConfigurationInterface interface { + Create(ctx context.Context, globalConfiguration *v1.GlobalConfiguration, opts metav1.CreateOptions) (*v1.GlobalConfiguration, error) + Update(ctx context.Context, globalConfiguration *v1.GlobalConfiguration, opts metav1.UpdateOptions) (*v1.GlobalConfiguration, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.GlobalConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.GlobalConfigurationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.GlobalConfiguration, err error) + GlobalConfigurationExpansion +} + +// globalConfigurations implements GlobalConfigurationInterface +type globalConfigurations struct { + client rest.Interface + ns string +} + +// newGlobalConfigurations returns a GlobalConfigurations +func newGlobalConfigurations(c *K8sV1Client, namespace string) *globalConfigurations { + return &globalConfigurations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the globalConfiguration, and returns the corresponding globalConfiguration object, and an error if there is any. +func (c *globalConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.GlobalConfiguration, err error) { + result = &v1.GlobalConfiguration{} + err = c.client.Get(). + Namespace(c.ns). + Resource("globalconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of GlobalConfigurations that match those selectors. +func (c *globalConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.GlobalConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.GlobalConfigurationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("globalconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested globalConfigurations. +func (c *globalConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("globalconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a globalConfiguration and creates it. Returns the server's representation of the globalConfiguration, and an error, if there is any. +func (c *globalConfigurations) Create(ctx context.Context, globalConfiguration *v1.GlobalConfiguration, opts metav1.CreateOptions) (result *v1.GlobalConfiguration, err error) { + result = &v1.GlobalConfiguration{} + err = c.client.Post(). + Namespace(c.ns). + Resource("globalconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(globalConfiguration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a globalConfiguration and updates it. Returns the server's representation of the globalConfiguration, and an error, if there is any. +func (c *globalConfigurations) Update(ctx context.Context, globalConfiguration *v1.GlobalConfiguration, opts metav1.UpdateOptions) (result *v1.GlobalConfiguration, err error) { + result = &v1.GlobalConfiguration{} + err = c.client.Put(). + Namespace(c.ns). + Resource("globalconfigurations"). + Name(globalConfiguration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(globalConfiguration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the globalConfiguration and deletes it. Returns an error if one occurs. +func (c *globalConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("globalconfigurations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *globalConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("globalconfigurations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched globalConfiguration. +func (c *globalConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.GlobalConfiguration, err error) { + result = &v1.GlobalConfiguration{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("globalconfigurations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/configuration/v1/transportserver.go b/pkg/client/clientset/versioned/typed/configuration/v1/transportserver.go new file mode 100644 index 0000000000..53b5788c42 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/configuration/v1/transportserver.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + scheme "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TransportServersGetter has a method to return a TransportServerInterface. +// A group's client should implement this interface. +type TransportServersGetter interface { + TransportServers(namespace string) TransportServerInterface +} + +// TransportServerInterface has methods to work with TransportServer resources. +type TransportServerInterface interface { + Create(ctx context.Context, transportServer *v1.TransportServer, opts metav1.CreateOptions) (*v1.TransportServer, error) + Update(ctx context.Context, transportServer *v1.TransportServer, opts metav1.UpdateOptions) (*v1.TransportServer, error) + UpdateStatus(ctx context.Context, transportServer *v1.TransportServer, opts metav1.UpdateOptions) (*v1.TransportServer, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TransportServer, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.TransportServerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TransportServer, err error) + TransportServerExpansion +} + +// transportServers implements TransportServerInterface +type transportServers struct { + client rest.Interface + ns string +} + +// newTransportServers returns a TransportServers +func newTransportServers(c *K8sV1Client, namespace string) *transportServers { + return &transportServers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the transportServer, and returns the corresponding transportServer object, and an error if there is any. +func (c *transportServers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.TransportServer, err error) { + result = &v1.TransportServer{} + err = c.client.Get(). + Namespace(c.ns). + Resource("transportservers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TransportServers that match those selectors. +func (c *transportServers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.TransportServerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.TransportServerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("transportservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested transportServers. +func (c *transportServers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("transportservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a transportServer and creates it. Returns the server's representation of the transportServer, and an error, if there is any. +func (c *transportServers) Create(ctx context.Context, transportServer *v1.TransportServer, opts metav1.CreateOptions) (result *v1.TransportServer, err error) { + result = &v1.TransportServer{} + err = c.client.Post(). + Namespace(c.ns). + Resource("transportservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(transportServer). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a transportServer and updates it. Returns the server's representation of the transportServer, and an error, if there is any. +func (c *transportServers) Update(ctx context.Context, transportServer *v1.TransportServer, opts metav1.UpdateOptions) (result *v1.TransportServer, err error) { + result = &v1.TransportServer{} + err = c.client.Put(). + Namespace(c.ns). + Resource("transportservers"). + Name(transportServer.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(transportServer). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *transportServers) UpdateStatus(ctx context.Context, transportServer *v1.TransportServer, opts metav1.UpdateOptions) (result *v1.TransportServer, err error) { + result = &v1.TransportServer{} + err = c.client.Put(). + Namespace(c.ns). + Resource("transportservers"). + Name(transportServer.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(transportServer). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the transportServer and deletes it. Returns an error if one occurs. +func (c *transportServers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("transportservers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *transportServers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("transportservers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched transportServer. +func (c *transportServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TransportServer, err error) { + result = &v1.TransportServer{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("transportservers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/informers/externalversions/configuration/v1/globalconfiguration.go b/pkg/client/informers/externalversions/configuration/v1/globalconfiguration.go new file mode 100644 index 0000000000..ae598e8dae --- /dev/null +++ b/pkg/client/informers/externalversions/configuration/v1/globalconfiguration.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + configurationv1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + versioned "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned" + internalinterfaces "github.com/nginxinc/kubernetes-ingress/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/nginxinc/kubernetes-ingress/pkg/client/listers/configuration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// GlobalConfigurationInformer provides access to a shared informer and lister for +// GlobalConfigurations. +type GlobalConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.GlobalConfigurationLister +} + +type globalConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewGlobalConfigurationInformer constructs a new informer for GlobalConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewGlobalConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredGlobalConfigurationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredGlobalConfigurationInformer constructs a new informer for GlobalConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredGlobalConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().GlobalConfigurations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().GlobalConfigurations(namespace).Watch(context.TODO(), options) + }, + }, + &configurationv1.GlobalConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *globalConfigurationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredGlobalConfigurationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *globalConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configurationv1.GlobalConfiguration{}, f.defaultInformer) +} + +func (f *globalConfigurationInformer) Lister() v1.GlobalConfigurationLister { + return v1.NewGlobalConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/configuration/v1/interface.go b/pkg/client/informers/externalversions/configuration/v1/interface.go index 85ebcfab80..84651665ea 100644 --- a/pkg/client/informers/externalversions/configuration/v1/interface.go +++ b/pkg/client/informers/externalversions/configuration/v1/interface.go @@ -8,8 +8,12 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // GlobalConfigurations returns a GlobalConfigurationInformer. + GlobalConfigurations() GlobalConfigurationInformer // Policies returns a PolicyInformer. Policies() PolicyInformer + // TransportServers returns a TransportServerInformer. + TransportServers() TransportServerInformer // VirtualServers returns a VirtualServerInformer. VirtualServers() VirtualServerInformer // VirtualServerRoutes returns a VirtualServerRouteInformer. @@ -27,11 +31,21 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// GlobalConfigurations returns a GlobalConfigurationInformer. +func (v *version) GlobalConfigurations() GlobalConfigurationInformer { + return &globalConfigurationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Policies returns a PolicyInformer. func (v *version) Policies() PolicyInformer { return &policyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// TransportServers returns a TransportServerInformer. +func (v *version) TransportServers() TransportServerInformer { + return &transportServerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // VirtualServers returns a VirtualServerInformer. func (v *version) VirtualServers() VirtualServerInformer { return &virtualServerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/configuration/v1/transportserver.go b/pkg/client/informers/externalversions/configuration/v1/transportserver.go new file mode 100644 index 0000000000..3e668ff571 --- /dev/null +++ b/pkg/client/informers/externalversions/configuration/v1/transportserver.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + configurationv1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + versioned "github.com/nginxinc/kubernetes-ingress/pkg/client/clientset/versioned" + internalinterfaces "github.com/nginxinc/kubernetes-ingress/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/nginxinc/kubernetes-ingress/pkg/client/listers/configuration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// TransportServerInformer provides access to a shared informer and lister for +// TransportServers. +type TransportServerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.TransportServerLister +} + +type transportServerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTransportServerInformer constructs a new informer for TransportServer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTransportServerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTransportServerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTransportServerInformer constructs a new informer for TransportServer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTransportServerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().TransportServers(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().TransportServers(namespace).Watch(context.TODO(), options) + }, + }, + &configurationv1.TransportServer{}, + resyncPeriod, + indexers, + ) +} + +func (f *transportServerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTransportServerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *transportServerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configurationv1.TransportServer{}, f.defaultInformer) +} + +func (f *transportServerInformer) Lister() v1.TransportServerLister { + return v1.NewTransportServerLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 6780037382..4165eab009 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -48,8 +48,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Externaldns().V1().DNSEndpoints().Informer()}, nil // Group=k8s.nginx.org, Version=v1 + case configurationv1.SchemeGroupVersion.WithResource("globalconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().GlobalConfigurations().Informer()}, nil case configurationv1.SchemeGroupVersion.WithResource("policies"): return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().Policies().Informer()}, nil + case configurationv1.SchemeGroupVersion.WithResource("transportservers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().TransportServers().Informer()}, nil case configurationv1.SchemeGroupVersion.WithResource("virtualservers"): return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().VirtualServers().Informer()}, nil case configurationv1.SchemeGroupVersion.WithResource("virtualserverroutes"): diff --git a/pkg/client/listers/configuration/v1/expansion_generated.go b/pkg/client/listers/configuration/v1/expansion_generated.go index bbbd79253c..3e48138835 100644 --- a/pkg/client/listers/configuration/v1/expansion_generated.go +++ b/pkg/client/listers/configuration/v1/expansion_generated.go @@ -2,6 +2,14 @@ package v1 +// GlobalConfigurationListerExpansion allows custom methods to be added to +// GlobalConfigurationLister. +type GlobalConfigurationListerExpansion interface{} + +// GlobalConfigurationNamespaceListerExpansion allows custom methods to be added to +// GlobalConfigurationNamespaceLister. +type GlobalConfigurationNamespaceListerExpansion interface{} + // PolicyListerExpansion allows custom methods to be added to // PolicyLister. type PolicyListerExpansion interface{} @@ -10,6 +18,14 @@ type PolicyListerExpansion interface{} // PolicyNamespaceLister. type PolicyNamespaceListerExpansion interface{} +// TransportServerListerExpansion allows custom methods to be added to +// TransportServerLister. +type TransportServerListerExpansion interface{} + +// TransportServerNamespaceListerExpansion allows custom methods to be added to +// TransportServerNamespaceLister. +type TransportServerNamespaceListerExpansion interface{} + // VirtualServerListerExpansion allows custom methods to be added to // VirtualServerLister. type VirtualServerListerExpansion interface{} diff --git a/pkg/client/listers/configuration/v1/globalconfiguration.go b/pkg/client/listers/configuration/v1/globalconfiguration.go new file mode 100644 index 0000000000..6c7a2f99ad --- /dev/null +++ b/pkg/client/listers/configuration/v1/globalconfiguration.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// GlobalConfigurationLister helps list GlobalConfigurations. +// All objects returned here must be treated as read-only. +type GlobalConfigurationLister interface { + // List lists all GlobalConfigurations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.GlobalConfiguration, err error) + // GlobalConfigurations returns an object that can list and get GlobalConfigurations. + GlobalConfigurations(namespace string) GlobalConfigurationNamespaceLister + GlobalConfigurationListerExpansion +} + +// globalConfigurationLister implements the GlobalConfigurationLister interface. +type globalConfigurationLister struct { + indexer cache.Indexer +} + +// NewGlobalConfigurationLister returns a new GlobalConfigurationLister. +func NewGlobalConfigurationLister(indexer cache.Indexer) GlobalConfigurationLister { + return &globalConfigurationLister{indexer: indexer} +} + +// List lists all GlobalConfigurations in the indexer. +func (s *globalConfigurationLister) List(selector labels.Selector) (ret []*v1.GlobalConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.GlobalConfiguration)) + }) + return ret, err +} + +// GlobalConfigurations returns an object that can list and get GlobalConfigurations. +func (s *globalConfigurationLister) GlobalConfigurations(namespace string) GlobalConfigurationNamespaceLister { + return globalConfigurationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// GlobalConfigurationNamespaceLister helps list and get GlobalConfigurations. +// All objects returned here must be treated as read-only. +type GlobalConfigurationNamespaceLister interface { + // List lists all GlobalConfigurations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.GlobalConfiguration, err error) + // Get retrieves the GlobalConfiguration from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.GlobalConfiguration, error) + GlobalConfigurationNamespaceListerExpansion +} + +// globalConfigurationNamespaceLister implements the GlobalConfigurationNamespaceLister +// interface. +type globalConfigurationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all GlobalConfigurations in the indexer for a given namespace. +func (s globalConfigurationNamespaceLister) List(selector labels.Selector) (ret []*v1.GlobalConfiguration, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.GlobalConfiguration)) + }) + return ret, err +} + +// Get retrieves the GlobalConfiguration from the indexer for a given namespace and name. +func (s globalConfigurationNamespaceLister) Get(name string) (*v1.GlobalConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("globalconfiguration"), name) + } + return obj.(*v1.GlobalConfiguration), nil +} diff --git a/pkg/client/listers/configuration/v1/transportserver.go b/pkg/client/listers/configuration/v1/transportserver.go new file mode 100644 index 0000000000..f943f558bc --- /dev/null +++ b/pkg/client/listers/configuration/v1/transportserver.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/nginxinc/kubernetes-ingress/pkg/apis/configuration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// TransportServerLister helps list TransportServers. +// All objects returned here must be treated as read-only. +type TransportServerLister interface { + // List lists all TransportServers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.TransportServer, err error) + // TransportServers returns an object that can list and get TransportServers. + TransportServers(namespace string) TransportServerNamespaceLister + TransportServerListerExpansion +} + +// transportServerLister implements the TransportServerLister interface. +type transportServerLister struct { + indexer cache.Indexer +} + +// NewTransportServerLister returns a new TransportServerLister. +func NewTransportServerLister(indexer cache.Indexer) TransportServerLister { + return &transportServerLister{indexer: indexer} +} + +// List lists all TransportServers in the indexer. +func (s *transportServerLister) List(selector labels.Selector) (ret []*v1.TransportServer, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TransportServer)) + }) + return ret, err +} + +// TransportServers returns an object that can list and get TransportServers. +func (s *transportServerLister) TransportServers(namespace string) TransportServerNamespaceLister { + return transportServerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// TransportServerNamespaceLister helps list and get TransportServers. +// All objects returned here must be treated as read-only. +type TransportServerNamespaceLister interface { + // List lists all TransportServers in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.TransportServer, err error) + // Get retrieves the TransportServer from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.TransportServer, error) + TransportServerNamespaceListerExpansion +} + +// transportServerNamespaceLister implements the TransportServerNamespaceLister +// interface. +type transportServerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all TransportServers in the indexer for a given namespace. +func (s transportServerNamespaceLister) List(selector labels.Selector) (ret []*v1.TransportServer, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TransportServer)) + }) + return ret, err +} + +// Get retrieves the TransportServer from the indexer for a given namespace and name. +func (s transportServerNamespaceLister) Get(name string) (*v1.TransportServer, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("transportserver"), name) + } + return obj.(*v1.TransportServer), nil +} diff --git a/tests/data/prometheus/transport-server/global-configuration.yaml b/tests/data/prometheus/transport-server/global-configuration.yaml index aa167f265c..b98e37d9a3 100644 --- a/tests/data/prometheus/transport-server/global-configuration.yaml +++ b/tests/data/prometheus/transport-server/global-configuration.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/prometheus/transport-server/passthrough.yaml b/tests/data/prometheus/transport-server/passthrough.yaml index 3d1c77b18d..e84685336d 100644 --- a/tests/data/prometheus/transport-server/passthrough.yaml +++ b/tests/data/prometheus/transport-server/passthrough.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: passthrough diff --git a/tests/data/prometheus/transport-server/tcp.yaml b/tests/data/prometheus/transport-server/tcp.yaml index 73d78a6e3a..17ed81e03b 100644 --- a/tests/data/prometheus/transport-server/tcp.yaml +++ b/tests/data/prometheus/transport-server/tcp.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: tcp diff --git a/tests/data/prometheus/transport-server/udp.yaml b/tests/data/prometheus/transport-server/udp.yaml index cad9cfd31b..deeaa1a328 100644 --- a/tests/data/prometheus/transport-server/udp.yaml +++ b/tests/data/prometheus/transport-server/udp.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: udp diff --git a/tests/data/transport-server-externalname/standard/global-configuration.yaml b/tests/data/transport-server-externalname/standard/global-configuration.yaml index 18b6c3928c..ee0156a367 100644 --- a/tests/data/transport-server-externalname/standard/global-configuration.yaml +++ b/tests/data/transport-server-externalname/standard/global-configuration.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/transport-server-externalname/standard/transport-server.yaml b/tests/data/transport-server-externalname/standard/transport-server.yaml index 1aff825167..ea0e6a55cc 100644 --- a/tests/data/transport-server-externalname/standard/transport-server.yaml +++ b/tests/data/transport-server-externalname/standard/transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-status/rejected-invalid.yaml b/tests/data/transport-server-status/rejected-invalid.yaml index 5f51486d99..343312bebe 100644 --- a/tests/data/transport-server-status/rejected-invalid.yaml +++ b/tests/data/transport-server-status/rejected-invalid.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-status/rejected-warning.yaml b/tests/data/transport-server-status/rejected-warning.yaml index b3e3176c47..73e68b5803 100644 --- a/tests/data/transport-server-status/rejected-warning.yaml +++ b/tests/data/transport-server-status/rejected-warning.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-status/standard/global-configuration.yaml b/tests/data/transport-server-status/standard/global-configuration.yaml index 18b6c3928c..ee0156a367 100644 --- a/tests/data/transport-server-status/standard/global-configuration.yaml +++ b/tests/data/transport-server-status/standard/global-configuration.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/transport-server-status/standard/transport-server.yaml b/tests/data/transport-server-status/standard/transport-server.yaml index 277568ed44..159fcebd49 100644 --- a/tests/data/transport-server-status/standard/transport-server.yaml +++ b/tests/data/transport-server-status/standard/transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/failing-hc-transport-server.yaml b/tests/data/transport-server-tcp-load-balance/failing-hc-transport-server.yaml index fc624268c0..0ef12185f3 100644 --- a/tests/data/transport-server-tcp-load-balance/failing-hc-transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/failing-hc-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/max-connections-transport-server.yaml b/tests/data/transport-server-tcp-load-balance/max-connections-transport-server.yaml index da2e1a4a34..19aa3b161b 100644 --- a/tests/data/transport-server-tcp-load-balance/max-connections-transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/max-connections-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/method-transport-server.yaml b/tests/data/transport-server-tcp-load-balance/method-transport-server.yaml index aa65f4ba36..7d32b7400f 100644 --- a/tests/data/transport-server-tcp-load-balance/method-transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/method-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/missing-service-transport-server.yaml b/tests/data/transport-server-tcp-load-balance/missing-service-transport-server.yaml index 523933d6a2..edae5f1215 100644 --- a/tests/data/transport-server-tcp-load-balance/missing-service-transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/missing-service-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/passing-hc-transport-server.yaml b/tests/data/transport-server-tcp-load-balance/passing-hc-transport-server.yaml index 067a5b0028..57db9a9622 100644 --- a/tests/data/transport-server-tcp-load-balance/passing-hc-transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/passing-hc-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/second-transport-server.yaml b/tests/data/transport-server-tcp-load-balance/second-transport-server.yaml index 6b364fb393..e3abde7b92 100644 --- a/tests/data/transport-server-tcp-load-balance/second-transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/second-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server-two diff --git a/tests/data/transport-server-tcp-load-balance/standard/global-configuration.yaml b/tests/data/transport-server-tcp-load-balance/standard/global-configuration.yaml index 5052feab95..3d586279a0 100644 --- a/tests/data/transport-server-tcp-load-balance/standard/global-configuration.yaml +++ b/tests/data/transport-server-tcp-load-balance/standard/global-configuration.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/transport-server-tcp-load-balance/standard/transport-server.yaml b/tests/data/transport-server-tcp-load-balance/standard/transport-server.yaml index 3e0c11367c..7f3a4d8c9b 100644 --- a/tests/data/transport-server-tcp-load-balance/standard/transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/standard/transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/transport-server-tls.yaml b/tests/data/transport-server-tcp-load-balance/transport-server-tls.yaml index a3c004f2cb..f0b00b0fa7 100644 --- a/tests/data/transport-server-tcp-load-balance/transport-server-tls.yaml +++ b/tests/data/transport-server-tcp-load-balance/transport-server-tls.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tcp-load-balance/wrong-port-transport-server.yaml b/tests/data/transport-server-tcp-load-balance/wrong-port-transport-server.yaml index be7377e9a5..5222dcc628 100644 --- a/tests/data/transport-server-tcp-load-balance/wrong-port-transport-server.yaml +++ b/tests/data/transport-server-tcp-load-balance/wrong-port-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tls-passthrough/standard/transport-server.yaml b/tests/data/transport-server-tls-passthrough/standard/transport-server.yaml index 868ee78e80..35a86e6cce 100644 --- a/tests/data/transport-server-tls-passthrough/standard/transport-server.yaml +++ b/tests/data/transport-server-tls-passthrough/standard/transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-tls-passthrough/transport-server-same-host.yaml b/tests/data/transport-server-tls-passthrough/transport-server-same-host.yaml index 47f34a4b02..11b9ac6bf6 100644 --- a/tests/data/transport-server-tls-passthrough/transport-server-same-host.yaml +++ b/tests/data/transport-server-tls-passthrough/transport-server-same-host.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server-2 diff --git a/tests/data/transport-server-udp-load-balance/failing-hc-transport-server.yaml b/tests/data/transport-server-udp-load-balance/failing-hc-transport-server.yaml index 83f1378176..d12a6744a6 100644 --- a/tests/data/transport-server-udp-load-balance/failing-hc-transport-server.yaml +++ b/tests/data/transport-server-udp-load-balance/failing-hc-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-udp-load-balance/missing-service-transport-server.yaml b/tests/data/transport-server-udp-load-balance/missing-service-transport-server.yaml index dc3ce61f6f..96179d6a7f 100644 --- a/tests/data/transport-server-udp-load-balance/missing-service-transport-server.yaml +++ b/tests/data/transport-server-udp-load-balance/missing-service-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-udp-load-balance/passing-hc-transport-server.yaml b/tests/data/transport-server-udp-load-balance/passing-hc-transport-server.yaml index edbf19018d..d5e2f340c5 100644 --- a/tests/data/transport-server-udp-load-balance/passing-hc-transport-server.yaml +++ b/tests/data/transport-server-udp-load-balance/passing-hc-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-udp-load-balance/second-transport-server.yaml b/tests/data/transport-server-udp-load-balance/second-transport-server.yaml index b95dff3be7..55b30feab9 100644 --- a/tests/data/transport-server-udp-load-balance/second-transport-server.yaml +++ b/tests/data/transport-server-udp-load-balance/second-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server-two diff --git a/tests/data/transport-server-udp-load-balance/standard/global-configuration.yaml b/tests/data/transport-server-udp-load-balance/standard/global-configuration.yaml index 0de8f4f66c..7fc8dee6ec 100644 --- a/tests/data/transport-server-udp-load-balance/standard/global-configuration.yaml +++ b/tests/data/transport-server-udp-load-balance/standard/global-configuration.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/transport-server-udp-load-balance/standard/transport-server.yaml b/tests/data/transport-server-udp-load-balance/standard/transport-server.yaml index 448c5ef047..5c655dc87f 100644 --- a/tests/data/transport-server-udp-load-balance/standard/transport-server.yaml +++ b/tests/data/transport-server-udp-load-balance/standard/transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server-udp-load-balance/wrong-port-transport-server.yaml b/tests/data/transport-server-udp-load-balance/wrong-port-transport-server.yaml index 9a515e5622..7d1e36ffd5 100644 --- a/tests/data/transport-server-udp-load-balance/wrong-port-transport-server.yaml +++ b/tests/data/transport-server-udp-load-balance/wrong-port-transport-server.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server/transport-server-configurable-timeouts.yaml b/tests/data/transport-server/transport-server-configurable-timeouts.yaml index f0acd86223..ec77c55d51 100644 --- a/tests/data/transport-server/transport-server-configurable-timeouts.yaml +++ b/tests/data/transport-server/transport-server-configurable-timeouts.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/transport-server/transport-server-snippets.yaml b/tests/data/transport-server/transport-server-snippets.yaml index 1cd4ccda06..ec3c58ba99 100644 --- a/tests/data/transport-server/transport-server-snippets.yaml +++ b/tests/data/transport-server/transport-server-snippets.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: TransportServer metadata: name: transport-server diff --git a/tests/data/virtual-server-custom-listeners/global-configuration-http-listener-with-ssl.yaml b/tests/data/virtual-server-custom-listeners/global-configuration-http-listener-with-ssl.yaml index 11daf67d67..4bd2f2713d 100644 --- a/tests/data/virtual-server-custom-listeners/global-configuration-http-listener-with-ssl.yaml +++ b/tests/data/virtual-server-custom-listeners/global-configuration-http-listener-with-ssl.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/virtual-server-custom-listeners/global-configuration-https-listener-without-ssl.yaml b/tests/data/virtual-server-custom-listeners/global-configuration-https-listener-without-ssl.yaml index f447489926..91410eeb65 100644 --- a/tests/data/virtual-server-custom-listeners/global-configuration-https-listener-without-ssl.yaml +++ b/tests/data/virtual-server-custom-listeners/global-configuration-https-listener-without-ssl.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/virtual-server-custom-listeners/global-configuration-missing-http-https.yaml b/tests/data/virtual-server-custom-listeners/global-configuration-missing-http-https.yaml index 66bbd0632d..56353d8586 100644 --- a/tests/data/virtual-server-custom-listeners/global-configuration-missing-http-https.yaml +++ b/tests/data/virtual-server-custom-listeners/global-configuration-missing-http-https.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/virtual-server-custom-listeners/global-configuration-missing-http.yaml b/tests/data/virtual-server-custom-listeners/global-configuration-missing-http.yaml index d04f853ab4..8fbf412b25 100644 --- a/tests/data/virtual-server-custom-listeners/global-configuration-missing-http.yaml +++ b/tests/data/virtual-server-custom-listeners/global-configuration-missing-http.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/virtual-server-custom-listeners/global-configuration-missing-https.yaml b/tests/data/virtual-server-custom-listeners/global-configuration-missing-https.yaml index e1d04f916c..5ef58641d1 100644 --- a/tests/data/virtual-server-custom-listeners/global-configuration-missing-https.yaml +++ b/tests/data/virtual-server-custom-listeners/global-configuration-missing-https.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/data/virtual-server-custom-listeners/global-configuration.yaml b/tests/data/virtual-server-custom-listeners/global-configuration.yaml index cb6edda94e..1215383b59 100644 --- a/tests/data/virtual-server-custom-listeners/global-configuration.yaml +++ b/tests/data/virtual-server-custom-listeners/global-configuration.yaml @@ -1,4 +1,4 @@ -apiVersion: k8s.nginx.org/v1alpha1 +apiVersion: k8s.nginx.org/v1 kind: GlobalConfiguration metadata: name: nginx-configuration diff --git a/tests/suite/utils/custom_resources_utils.py b/tests/suite/utils/custom_resources_utils.py index 587773d3b3..a6b3e9f46b 100644 --- a/tests/suite/utils/custom_resources_utils.py +++ b/tests/suite/utils/custom_resources_utils.py @@ -110,9 +110,9 @@ def read_custom_resource_v1alpha1(custom_objects: CustomObjectsApi, namespace, p :param name: the custom object's name :return: object """ - print(f"Getting info for v1alpha1 crd {name} in namespace {namespace}") + print(f"Getting info for v1 crd {name} in namespace {namespace}") try: - response = custom_objects.get_namespaced_custom_object("k8s.nginx.org", "v1alpha1", namespace, plural, name) + response = custom_objects.get_namespaced_custom_object("k8s.nginx.org", "v1", namespace, plural, name) pprint(response) return response @@ -125,7 +125,7 @@ def read_ts(custom_objects: CustomObjectsApi, namespace, name) -> object: """ Read TransportService resource. """ - return read_custom_resource_v1alpha1(custom_objects, namespace, "transportservers", name) + return read_custom_resource(custom_objects, namespace, "transportservers", name) def create_ts_from_yaml(custom_objects: CustomObjectsApi, yaml_manifest, namespace) -> dict: @@ -163,7 +163,7 @@ def patch_gc_from_yaml(custom_objects: CustomObjectsApi, name, yaml_manifest, na :return: a dictionary representing the resource """ print(f"Load {yaml_manifest}") - return patch_custom_resource_v1alpha1(custom_objects, name, yaml_manifest, namespace, "globalconfigurations") + return patch_custom_resource(custom_objects, name, yaml_manifest, namespace, "globalconfigurations") def create_resource_from_yaml(custom_objects: CustomObjectsApi, yaml_manifest, namespace, plural) -> dict: @@ -373,10 +373,10 @@ def patch_ts_from_yaml(custom_objects: CustomObjectsApi, name, yaml_manifest, na """ Patch a TransportServer based on yaml manifest """ - return patch_custom_resource_v1alpha1(custom_objects, name, yaml_manifest, namespace, "transportservers") + return patch_custom_resource(custom_objects, name, yaml_manifest, namespace, "transportservers") -def patch_custom_resource_v1alpha1(custom_objects: CustomObjectsApi, name, yaml_manifest, namespace, plural) -> None: +def patch_custom_resource(custom_objects: CustomObjectsApi, name, yaml_manifest, namespace, plural) -> None: """ Patch a custom resource based on yaml manifest """ @@ -385,7 +385,7 @@ def patch_custom_resource_v1alpha1(custom_objects: CustomObjectsApi, name, yaml_ dep = yaml.safe_load(f) try: - custom_objects.patch_namespaced_custom_object("k8s.nginx.org", "v1alpha1", namespace, plural, name, dep) + custom_objects.patch_namespaced_custom_object("k8s.nginx.org", "v1", namespace, plural, name, dep) except ApiException: logging.exception(f"Failed with exception while patching custom resource: {name}") raise @@ -400,9 +400,7 @@ def patch_ts(custom_objects: CustomObjectsApi, namespace, body) -> None: print(f"Update a Resource: {name}") try: - custom_objects.patch_namespaced_custom_object( - "k8s.nginx.org", "v1alpha1", namespace, "transportservers", name, body - ) + custom_objects.patch_namespaced_custom_object("k8s.nginx.org", "v1", namespace, "transportservers", name, body) except ApiException: logging.exception(f"Failed with exception while patching custom resource: {name}") raise