From 38dc912170e7b83e020175fd4abcdb6649191ddc Mon Sep 17 00:00:00 2001 From: Dan Stough Date: Wed, 13 Sep 2023 21:36:34 -0400 Subject: [PATCH] feat: v2 mesh-init command (#2941) * feat: v2 mesh-init command * bugfix mesh-init test * add mesh-init args to webhook * fix: remove v2 flags from partition-init * update telemetry-collector with v2 flags * Apply suggestions from code review Co-authored-by: Michael Zalimeni * PR feedback Part II * bugfix test * fix: endpoints v2 selector stability --------- Co-authored-by: Michael Zalimeni --- .../consul/templates/partition-init-job.yaml | 3 - .../telemetry-collector-deployment.yaml | 2 +- .../telemetry-collector-v2-deployment.yaml | 378 +++++ .../unit/telemetry-collector-deployment.bats | 14 + .../telemetry-collector-v2-deployment.bats | 1349 +++++++++++++++++ control-plane/commands.go | 8 +- .../constants/annotations_and_labels.go | 6 + .../endpointsv2/endpoints_controller.go | 12 +- .../endpointsv2/endpoints_controller_test.go | 12 +- .../consul_dataplane_sidecar_test.go | 2 +- .../webhook_v2/container_init.go | 29 +- .../webhook_v2/container_init_test.go | 84 +- .../webhook_v2/container_volume.go | 2 +- .../connect-inject/webhook_v2/mesh_webhook.go | 4 +- .../webhook_v2/mesh_webhook_test.go | 6 +- control-plane/consul/dataplane_client.go | 28 + control-plane/consul/dataplane_client_test.go | 206 +++ control-plane/subcommand/mesh-init/command.go | 283 ++++ .../subcommand/mesh-init/command_ent_test.go | 118 ++ .../subcommand/mesh-init/command_test.go | 425 ++++++ .../subcommand/partition-init/command.go | 9 - 21 files changed, 2868 insertions(+), 112 deletions(-) create mode 100644 charts/consul/templates/telemetry-collector-v2-deployment.yaml create mode 100755 charts/consul/test/unit/telemetry-collector-v2-deployment.bats create mode 100644 control-plane/consul/dataplane_client.go create mode 100644 control-plane/consul/dataplane_client_test.go create mode 100644 control-plane/subcommand/mesh-init/command.go create mode 100644 control-plane/subcommand/mesh-init/command_ent_test.go create mode 100644 control-plane/subcommand/mesh-init/command_test.go diff --git a/charts/consul/templates/partition-init-job.yaml b/charts/consul/templates/partition-init-job.yaml index 663a64bcbf..6e21289f22 100644 --- a/charts/consul/templates/partition-init-job.yaml +++ b/charts/consul/templates/partition-init-job.yaml @@ -117,9 +117,6 @@ spec: {{- if .Values.global.cloud.enabled }} -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ {{- end }} - {{- if (mustHas "resource-apis" .Values.global.experiments) }} - -enable-resource-apis=true - {{- end }} resources: requests: memory: "50Mi" diff --git a/charts/consul/templates/telemetry-collector-deployment.yaml b/charts/consul/templates/telemetry-collector-deployment.yaml index d6f3a91cfa..396cc147ab 100644 --- a/charts/consul/templates/telemetry-collector-deployment.yaml +++ b/charts/consul/templates/telemetry-collector-deployment.yaml @@ -1,4 +1,4 @@ -{{- if .Values.telemetryCollector.enabled }} +{{- if and .Values.telemetryCollector.enabled (not (mustHas "resource-apis" .Values.global.experiments)) }} {{- if not .Values.telemetryCollector.image}}{{ fail "telemetryCollector.image must be set to enable consul-telemetry-collector" }}{{ end }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} diff --git a/charts/consul/templates/telemetry-collector-v2-deployment.yaml b/charts/consul/templates/telemetry-collector-v2-deployment.yaml new file mode 100644 index 0000000000..a88277f3b2 --- /dev/null +++ b/charts/consul/templates/telemetry-collector-v2-deployment.yaml @@ -0,0 +1,378 @@ +{{- if and .Values.telemetryCollector.enabled (mustHas "resource-apis" .Values.global.experiments) }} +{{- if not .Values.telemetryCollector.image}}{{ fail "telemetryCollector.image must be set to enable consul-telemetry-collector" }}{{ end }} +{{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{ template "consul.validateCloudSecretKeys" . }} +{{ template "consul.validateTelemetryCollectorCloud" . }} +{{ template "consul.validateTelemetryCollectorCloudSecretKeys" . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-telemetry-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: consul-telemetry-collector + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.telemetryCollector.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: consul-telemetry-collector + template: + metadata: + annotations: + "consul.hashicorp.com/mesh-inject": "false" + # This annotation tells the pod controller that this pod was injected even though it wasn't. + # This ensures the pod controller will sync a workload for the pod into Consul + "consul.hashicorp.com/mesh-inject-status": "injected" + # We aren't using tproxy and we don't have an original pod. This would be simpler if we made a path similar + # to gateways + "consul.hashicorp.com/transparent-proxy": "false" + "consul.hashicorp.com/transparent-proxy-overwrite-probes": "false" + "consul.hashicorp.com/consul-k8s-version": {{ $.Chart.Version }} + {{- if .Values.telemetryCollector.customExporterConfig }} + # configmap checksum + "consul.hashicorp.com/config-checksum": {{ include (print $.Template.BasePath "/telemetry-collector-configmap.yaml") . | sha256sum }} + {{- end }} + # vault annotations + {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- if (and (.Values.global.secretsBackend.vault.vaultNamespace) (not (hasKey (default "" .Values.global.secretsBackend.vault.agentAnnotations | fromYaml) "vault.hashicorp.com/namespace")))}} + "vault.hashicorp.com/namespace": "{{ .Values.global.secretsBackend.vault.vaultNamespace }}" + {{- end }} + {{- end }} + + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: consul-telemetry-collector + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 8 }} + {{- end }} + spec: + # This needs to explicitly be consul-telemetry-collector because we look this up from each service consul-dataplane + # to forward metrics to it. + serviceAccountName: consul-telemetry-collector + initContainers: + # We're manually managing this init container instead of using the mesh injector so that we don't run into + # any race conditions on the mesh-injector deployment or upgrade + - name: consul-mesh-init + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_LOGIN_AUTH_METHOD + value: {{ template "consul.fullname" . }}-k8s-auth-method + - name: CONSUL_LOGIN_META + value: "component=consul-telemetry-collector,pod=$(NAMESPACE)/$(POD_NAME)" + {{- end }} + {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 10 }} + {{- if .Values.global.enableConsulNamespaces }} + - name: CONSUL_NAMESPACE + value: {{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} + - name: CONSUL_LOGIN_NAMESPACE + value: "default" + {{- else }} + - name: CONSUL_LOGIN_NAMESPACE + value: {{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + {{- end }} + {{- end }} + command: + - /bin/sh + - -ec + - |- + consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ + -log-level={{ default .Values.global.logLevel .Values.telemetryCollector.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + + image: {{ .Values.global.imageK8S }} + imagePullPolicy: IfNotPresent + {{- if .Values.telemetryCollector.initContainer.resources }} + resources: + {{- toYaml .Values.telemetryCollector.initContainer.resources | nindent 12 }} + {{- else }} + resources: + limits: + cpu: 50m + memory: 150Mi + requests: + cpu: 50m + memory: 25Mi + {{- end }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /consul/mesh-inject + name: consul-mesh-inject-data + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + containers: + - name: consul-telemetry-collector + image: {{ .Values.telemetryCollector.image }} + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + ports: + - containerPort: 9090 + name: metrics + protocol: TCP + - containerPort: 9356 + name: metricsserver + protocol: TCP + env: + # These are mounted as secrets so that the telemetry-collector can use them when cloud is enabled. + # - the hcp-go-sdk in consul agent will already look for HCP_CLIENT_ID, HCP_CLIENT_SECRET, HCP_AUTH_URL, + # HCP_SCADA_ADDRESS, and HCP_API_HOST. so nothing more needs to be done. + # - HCP_RESOURCE_ID is created for use in the global cloud section but we will share it here + {{- if .Values.telemetryCollector.cloud.clientId.secretName }} + - name: HCP_CLIENT_ID + valueFrom: + secretKeyRef: + name: {{ .Values.telemetryCollector.cloud.clientId.secretName }} + key: {{ .Values.telemetryCollector.cloud.clientId.secretKey }} + {{- end }} + {{- if .Values.telemetryCollector.cloud.clientSecret.secretName }} + - name: HCP_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.telemetryCollector.cloud.clientSecret.secretName }} + key: {{ .Values.telemetryCollector.cloud.clientSecret.secretKey }} + {{- end}} + {{- if .Values.global.cloud.resourceId.secretName }} + - name: HCP_RESOURCE_ID + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.resourceId.secretName }} + key: {{ .Values.global.cloud.resourceId.secretKey }} + {{- end }} + {{- if .Values.global.cloud.authUrl.secretName }} + - name: HCP_AUTH_URL + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.authUrl.secretName }} + key: {{ .Values.global.cloud.authUrl.secretKey }} + {{- end}} + {{- if .Values.global.cloud.apiHost.secretName }} + - name: HCP_API_HOST + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.apiHost.secretName }} + key: {{ .Values.global.cloud.apiHost.secretKey }} + {{- end}} + {{- if .Values.global.cloud.scadaAddress.secretName }} + - name: HCP_SCADA_ADDRESS + valueFrom: + secretKeyRef: + name: {{ .Values.global.cloud.scadaAddress.secretName }} + key: {{ .Values.global.cloud.scadaAddress.secretKey }} + {{- end}} + {{- if .Values.global.trustedCAs }} + - name: SSL_CERT_DIR + value: "/etc/ssl/certs:/trusted-cas" + {{- end }} + {{- include "consul.extraEnvironmentVars" .Values.telemetryCollector | nindent 12 }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.global.trustedCAs }} + {{- range $i, $cert := .Values.global.trustedCAs }} + cat < /trusted-cas/custom-ca-{{$i}}.pem + {{- $cert | nindent 10 }} + EOF + {{- end }} + {{- end }} + + consul-telemetry-collector agent \ + {{- if .Values.telemetryCollector.customExporterConfig }} + -config-file-path /consul/config/config.json \ + {{ end }} + volumeMounts: + {{- if .Values.telemetryCollector.customExporterConfig }} + - name: config + mountPath: /consul/config + {{- end }} + {{- if .Values.global.trustedCAs }} + - name: trusted-cas + mountPath: /trusted-cas + readOnly: false + {{- end }} + resources: + {{- if .Values.telemetryCollector.resources }} + {{- toYaml .Values.telemetryCollector.resources | nindent 12 }} + {{- end }} + # consul-dataplane container + - name: consul-dataplane + image: "{{ .Values.global.imageConsulDataplane }}" + imagePullPolicy: IfNotPresent + command: + - consul-dataplane + args: + # addresses + {{- if .Values.externalServers.enabled }} + - -addresses={{ .Values.externalServers.hosts | first }} + {{- else }} + - -addresses={{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc + {{- end }} + # grpc + {{- if .Values.externalServers.enabled }} + - -grpc-port={{ .Values.externalServers.grpcPort }} + {{- else }} + - -grpc-port=8502 + {{- end }} + # tls + {{- if .Values.global.tls.enabled }} + {{- if (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + - -ca-certs=/vault/secrets/serverca.crt + {{- else }} + - -ca-certs=/consul/tls/ca/tls.crt + {{- end }} + {{- end }} + {{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} + - -tls-server-name={{.Values.externalServers.tlsServerName }} + {{- else if .Values.global.cloud.enabled }} + - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} + {{- end }} + {{- else }} + - -tls-disabled + {{- end }} + # credentials + {{- if .Values.global.acls.manageSystemACLs }} + - -credential-type=login + - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + - -login-auth-method={{ template "consul.fullname" . }}-k8s-auth-method + {{- if .Values.global.enableConsulNamespaces }} + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} + - -login-namespace="default" + {{- else }} + - -login-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + {{- end }} + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + - foo + - -login-partition={{ .Values.global.adminPartitions.name }} + {{- end }} + {{- end }} + {{- if .Values.global.enableConsulNamespaces }} + - -service-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + - -service-partition={{ .Values.global.adminPartitions.name }} + {{- end }} + {{- if .Values.global.metrics.enabled }} + - -telemetry-prom-scrape-path=/metrics + {{- end }} + - -log-level={{ default .Values.global.logLevel .Values.telemetryCollector.logLevel }} + - -log-json={{ .Values.global.logJSON }} + - -envoy-concurrency=2 + {{- if and .Values.externalServers.enabled .Values.externalServers.skipServerWatch }} + - -server-watch-disabled=true + {{- end }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DP_PROXY_ID + value: $(POD_NAME) + - name: DP_CREDENTIAL_LOGIN_META1 + value: pod=$(NAMESPACE)/$(POD_NAME) + - name: DP_CREDENTIAL_LOGIN_META2 + value: component=consul-telemetry-collector + - name: TMPDIR + value: /consul/mesh-inject + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 1 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 20000 + timeoutSeconds: 1 + securityContext: + readOnlyRootFilesystem: true + runAsGroup: 5995 + runAsNonRoot: true + runAsUser: 5995 + # dataplane volume mounts + volumeMounts: + - mountPath: /consul/mesh-inject + name: consul-mesh-inject-data + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + + {{- if .Values.telemetryCollector.nodeSelector }} + nodeSelector: + {{ tpl .Values.telemetryCollector.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- if .Values.telemetryCollector.priorityClassName }} + priorityClassName: {{ .Values.telemetryCollector.priorityClassName }} + {{- end }} + volumes: + - emptyDir: + medium: Memory + name: consul-mesh-inject-data + {{- if .Values.global.trustedCAs }} + - name: trusted-cas + emptyDir: + medium: "Memory" + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- end }} + - name: config + configMap: + name: {{ template "consul.fullname" . }}-telemetry-collector +{{- end }} diff --git a/charts/consul/test/unit/telemetry-collector-deployment.bats b/charts/consul/test/unit/telemetry-collector-deployment.bats index ad50341061..432200541b 100755 --- a/charts/consul/test/unit/telemetry-collector-deployment.bats +++ b/charts/consul/test/unit/telemetry-collector-deployment.bats @@ -1198,4 +1198,18 @@ MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ local actual=$(echo "$cmd" | yq 'any(contains("-log-level=debug"))' | tee /dev/stderr) [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.experiments=["resource-apis"] + +@test "telemetryCollector/Deployment: disabled when V2 is enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-deployment.yaml \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + . } \ No newline at end of file diff --git a/charts/consul/test/unit/telemetry-collector-v2-deployment.bats b/charts/consul/test/unit/telemetry-collector-v2-deployment.bats new file mode 100755 index 0000000000..3f77169fd6 --- /dev/null +++ b/charts/consul/test/unit/telemetry-collector-v2-deployment.bats @@ -0,0 +1,1349 @@ +#!/usr/bin/env bats + +load _helpers + +@test "telemetryCollector/Deployment(V2): disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + . +} + +@test "telemetryCollector/Deployment(V2): fails if no image is set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=null' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "telemetryCollector.image must be set to enable consul-telemetry-collector" ]] +} + +@test "telemetryCollector/Deployment(V2): disable with telemetry-collector.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=false' \ + . +} + +@test "telemetryCollector/Deployment(V2): disable with global.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'global.enabled=false' \ + . +} + +@test "telemetryCollector/Deployment(V2): container image overrides" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "\"bar\"" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "telemetryCollector/Deployment(V2): nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "telemetryCollector/Deployment(V2): specified nodeSelector" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# consul.name + +@test "telemetryCollector/Deployment(V2): name is constant regardless of consul name" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'consul.name=foobar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].name' | tee /dev/stderr) + [ "${actual}" = "consul-telemetry-collector" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "telemetryCollector/Deployment(V2): Adds tls-ca-cert volume when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "telemetryCollector/Deployment(V2): Adds tls-ca-cert volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "telemetryCollector/Deployment(V2): can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volumeMount is added when TLS with auto-encrypt is enabled without clients" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee + /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "telemetryCollector/Deployment(V2): resources has default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "512Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "1000m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "512Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "1000m" ] +} + +@test "telemetryCollector/Deployment(V2): resources can be overridden" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'telemetryCollector.resources.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# init container resources + +@test "telemetryCollector/Deployment(V2): init container has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "150Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] +} + +@test "telemetryCollector/Deployment(V2): init container resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'telemetryCollector.initContainer.resources.requests.memory=memory' \ + --set 'telemetryCollector.initContainer.resources.requests.cpu=cpu' \ + --set 'telemetryCollector.initContainer.resources.limits.memory=memory2' \ + --set 'telemetryCollector.initContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "telemetryCollector/Deployment(V2): no priorityClassName by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "null" ] +} + +@test "telemetryCollector/Deployment(V2): can set a priorityClassName" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'telemetryCollector.priorityClassName=name' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) + + [ "${actual}" = "name" ] +} + +#-------------------------------------------------------------------- +# replicas + +@test "telemetryCollector/Deployment(V2): replicas defaults to 1" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + + [ "${actual}" = "1" ] +} + +@test "telemetryCollector/Deployment(V2): replicas can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'telemetryCollector.replicas=3' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + + [ "${actual}" = "3" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "telemetryCollector/Deployment(V2): vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "telemetryCollector/Deployment(V2): vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "telemetryCollector/Deployment(V2): vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "telemetryCollector/Deployment(V2): correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set without vaultNamespace annotation" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/agent-extra-secret: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "telemetryCollector/Deployment(V2): correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set with vaultNamespace annotation" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/namespace: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "bar" ] +} + +@test "telemetryCollector/Deployment(V2): vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "telemetryCollector/Deployment(V2): vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +@test "telemetryCollector/Deployment(V2): vault tls annotations are set when tls is enabled" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"pki_int/cert/ca\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr)" + [ "${actual}" = "pki_int/cert/ca" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" + [ "${actual}" = "test" ] +} + +@test "telemetryCollector/Deployment(V2): vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=foo' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# telemetryCollector.cloud + +@test "telemetryCollector/Deployment(V2): success with all cloud bits set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientSecret.secretName=client-secret-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-key' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ + . +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId is set and global.cloud.resourceId is not set or global.cloud.clientSecret.secretName is not set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientSecret.secretName=client-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.authUrl.secretName=auth-url-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.authUrl.secretKey=auth-url-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.apiHost.secretName=auth-url-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.apiHost.secretKey=auth-url-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.datacenter=dc-foo' \ + --set 'global.domain=bar' \ + --set 'global.cloud.enabled=true' \ + --set 'global.cloud.clientId.secretName=client-id-name' \ + --set 'global.cloud.clientId.secretKey=client-id-key' \ + --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ + --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId.secretName is set but telemetryCollector.cloud.clientId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.clientSecret.secretName=client-secret-id-name' \ + --set 'telemetryCollector.clientSecret.secretKey=client-secret-id-key' \ + --set 'global.resourceId.secretName=resource-id-name' \ + --set 'global.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When telemetryCollector.cloud.clientId.secretName is set, global.cloud.resourceId.secretName, telemetryCollector.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId.secretKey is set but telemetryCollector.cloud.clientId.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.clientSecret.secretName=client-secret-id-name' \ + --set 'global.resourceId.secretName=resource-id-name' \ + --set 'global.resourceId.secretKey=resource-id-key' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When telemetryCollector.cloud.clientId.secretName is set, global.cloud.resourceId.secretName, telemetryCollector.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientSecret.secretName is set but telemetryCollector.cloud.clientId.secretName is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.clientSecret.secretName=client-secret-id-name' \ + --set 'telemetryCollector.clientSecret.secretKey=client-secret-key-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When telemetryCollector.cloud.clientId.secretName is set, global.cloud.resourceId.secretName, telemetryCollector.cloud.clientSecret.secretName must also be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId.secretName is set but telemetry.cloud.clientId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-name' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either telemetryCollector.cloud.clientId.secretName or telemetryCollector.cloud.clientId.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientSecret.secretName is set but telemetry.cloud.clientSecret.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-name' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When either telemetryCollector.cloud.clientSecret.secretName or telemetryCollector.cloud.clientSecret.secretKey is defined, both must be set." ]] +} + +@test "telemetryCollector/Deployment(V2): fails when telemetryCollector.cloud.clientId and telemetryCollector.cloud.clientSecret is set but global.cloud.resourceId.secretKey is not set." { + cd `chart_dir` + run helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.cloud.clientId.secretName=client-id-name' \ + --set 'telemetryCollector.cloud.clientId.secretKey=client-id-key' \ + --set 'telemetryCollector.cloud.clientSecret.secretName=client-secret-name' \ + --set 'telemetryCollector.cloud.clientSecret.secretKey=client-secret-key' \ + --set 'global.cloud.resourceId.secretName=resource-id-name' \ + . + [ "$status" -eq 1 ] + + [[ "$output" =~ "When telemetryCollector has clientId and clientSecret .global.cloud.resourceId.secretKey must be set" ]] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "telemetryCollector/Deployment(V2): sets -tls-disabled args when when not using TLS." { + cd `chart_dir` + + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=false' \ + . | yq -r .spec.template.spec.containers[1].args) + + local actual=$(echo $flags | yq -r '. | any(contains("-tls-disabled"))') + [ "${actual}" = 'true' ] + +} + +@test "telemetryCollector/Deployment(V2): -ca-certs set correctly when using TLS." { + cd `chart_dir` + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo $flags | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# External Server + +@test "telemetryCollector/Deployment(V2): sets external server args when global.tls.enabled and externalServers.enabled" { + cd `chart_dir` + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.httpsPort=8501' \ + --set 'externalServers.tlsServerName=foo.tls.server' \ + --set 'externalServers.useSystemRoots=true' \ + --set 'server.enabled=false' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo $flags | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = 'false' ] + + local actual=$(echo $flags | yq -r '. | any(contains("-tls-server-name=foo.tls.server"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + local actual=$(echo $flags | jq -r '. | any(contains("-addresses=external-consul.host"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# Admin Partitions + +@test "telemetryCollector/Deployment(V2): partition flags are set when using admin partitions" { + cd `chart_dir` + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=hashi' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo $flags | jq -r '. | any(contains("-login-partition=hashi"))' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + local actual=$(echo $flags | jq -r '. | any(contains("-service-partition=hashi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volume mount is not set when using externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "telemetryCollector/Deployment(V2): config volume mount is set when config exists" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.customExporterConfig="foo"' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "config") | .name' | tee /dev/stderr) + [ "${actual}" = "config" ] +} + +@test "telemetryCollector/Deployment(V2): config flag is set when config exists" { + cd `chart_dir` + local flags=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'telemetryCollector.customExporterConfig="foo"' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $flags | yq -r '. | any(contains("-config-file-path /consul/config/config.json"))') + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): consul-ca-cert volume mount is not set on acl-init when using externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=external-consul.host' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} +#-------------------------------------------------------------------- +# trustedCAs + +@test "telemetryCollector/Deployment(V2): trustedCAs: if trustedCAs is set command is modified correctly" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("cat < /trusted-cas/custom-ca-0.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): trustedCAs: if multiple Trusted cas were set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + --set 'global.trustedCAs[1]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0]' | tee /dev/stderr) + + + local actual=$(echo $object | jq '.command[2] | contains("cat < /trusted-cas/custom-ca-0.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo $object | jq '.command[2] | contains("cat < /trusted-cas/custom-ca-1.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): trustedCAs: if trustedCAs is set /trusted-cas volumeMount is added" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec' | tee /dev/stderr) + local actual=$(echo $object | jq -r '.volumes[] | select(.name == "trusted-cas") | .name' | tee /dev/stderr) + [ "${actual}" = "trusted-cas" ] +} + + +@test "telemetryCollector/Deployment(V2): trustedCAs: if trustedCAs is set SSL_CERT_DIR env var is set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'global.trustedCAs[0]=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[] | select(.name == "SSL_CERT_DIR")' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.name' | tee /dev/stderr) + [ "${actual}" = "SSL_CERT_DIR" ] + local actual=$(echo $object | jq -r '.value' | tee /dev/stderr) + [ "${actual}" = "/etc/ssl/certs:/trusted-cas" ] +} + +#-------------------------------------------------------------------- +# extraLabels + +@test "telemetryCollector/Deployment(V2): no extra labels defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component") | del(."consul.hashicorp.com/connect-inject-managed-by")' \ + | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "telemetryCollector/Deployment(V2): extra global labels can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.extraLabels.foo=bar' \ + . | tee /dev/stderr) + local actualBar=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + [ "${actualBar}" = "bar" ] + local actualTemplateBar=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + [ "${actualTemplateBar}" = "bar" ] +} + +@test "telemetryCollector/Deployment(V2): multiple global extra labels can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + --set 'global.extraLabels.foo=bar' \ + --set 'global.extraLabels.baz=qux' \ + . | tee /dev/stderr) + local actualFoo=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + local actualBaz=$(echo "${actual}" | yq -r '.metadata.labels.baz' | tee /dev/stderr) + [ "${actualFoo}" = "bar" ] + [ "${actualBaz}" = "qux" ] + local actualTemplateFoo=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + local actualTemplateBaz=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.baz' | tee /dev/stderr) + [ "${actualTemplateFoo}" = "bar" ] + [ "${actualTemplateBaz}" = "qux" ] +} + +#-------------------------------------------------------------------- +# extraEnvironmentVariables + +@test "telemetryCollector/Deployment(V2): extra environment variables" { + cd `chart_dir` + local object=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.extraEnvironmentVars.HCP_AUTH_TLS=insecure' \ + --set 'telemetryCollector.extraEnvironmentVars.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r 'map(select(.name == "HCP_AUTH_TLS")) | .[0].value' | tee /dev/stderr) + [ "${actual}" = "insecure" ] + + local actual=$(echo $object | + yq -r 'map(select(.name == "foo")) | .[0].value' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +#-------------------------------------------------------------------- +# logLevel + +@test "telemetryCollector/Deployment(V2): use global.logLevel by default" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): override global.logLevel when telemetryCollector.logLevel is set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.logLevel=warn' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=warn"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): use global.logLevel by default for dataplane container" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "telemetryCollector/Deployment(V2): override global.logLevel when telemetryCollector.logLevel is set for dataplane container" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'ui.enabled=false' \ + --set 'global.experiments[0]=resource-apis' \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.logLevel=debug' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].args' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=debug"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.experiments=["resource-apis"] + +@test "telemetryCollector/Deployment(V2): disabled when V2 is disabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/telemetry-collector-v2-deployment.yaml \ + --set 'telemetryCollector.enabled=true' \ + --set 'telemetryCollector.image=bar' \ + . +} \ No newline at end of file diff --git a/control-plane/commands.go b/control-plane/commands.go index e2bcb0f693..01f5163bc3 100644 --- a/control-plane/commands.go +++ b/control-plane/commands.go @@ -6,6 +6,8 @@ package main import ( "os" + "github.com/mitchellh/cli" + cmdACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/acl-init" cmdConnectInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/connect-init" cmdConsulLogout "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-logout" @@ -18,6 +20,7 @@ import ( cmdGossipEncryptionAutogenerate "github.com/hashicorp/consul-k8s/control-plane/subcommand/gossip-encryption-autogenerate" cmdInjectConnect "github.com/hashicorp/consul-k8s/control-plane/subcommand/inject-connect" cmdInstallCNI "github.com/hashicorp/consul-k8s/control-plane/subcommand/install-cni" + cmdMeshInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/mesh-init" cmdPartitionInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/partition-init" cmdServerACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/server-acl-init" cmdSyncCatalog "github.com/hashicorp/consul-k8s/control-plane/subcommand/sync-catalog" @@ -25,7 +28,6 @@ import ( cmdVersion "github.com/hashicorp/consul-k8s/control-plane/subcommand/version" webhookCertManager "github.com/hashicorp/consul-k8s/control-plane/subcommand/webhook-cert-manager" "github.com/hashicorp/consul-k8s/control-plane/version" - "github.com/mitchellh/cli" ) // Commands is the mapping of all available consul-k8s commands. @@ -43,6 +45,10 @@ func init() { return &cmdConnectInit.Command{UI: ui}, nil }, + "mesh-init": func() (cli.Command, error) { + return &cmdMeshInit.Command{UI: ui}, nil + }, + "inject-connect": func() (cli.Command, error) { return &cmdInjectConnect.Command{UI: ui}, nil }, diff --git a/control-plane/connect-inject/constants/annotations_and_labels.go b/control-plane/connect-inject/constants/annotations_and_labels.go index cd563f2436..bc28930eae 100644 --- a/control-plane/connect-inject/constants/annotations_and_labels.go +++ b/control-plane/connect-inject/constants/annotations_and_labels.go @@ -235,6 +235,12 @@ const ( // port is the local port in the pod that the listener will bind to. It can // be a named port. AnnotationMeshDestinations = "consul.hashicorp.com/mesh-service-destinations" + + // AnnotationMeshInjectMountVolumes is the key of the annotation that controls whether + // the data volume that mesh inject uses to store data including the Consul ACL token + // is mounted to other containers in the pod. It is a comma-separated list of container names + // to mount the volume on. It will be mounted at the path `/consul/mesh-inject`. + AnnotationMeshInjectMountVolumes = "consul.hashicorp.com/mesh-inject-mount-volume" ) // Annotations used by Prometheus. diff --git a/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller.go b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller.go index ad32510861..dca7dad950 100644 --- a/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller.go +++ b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller.go @@ -5,6 +5,7 @@ package endpointsv2 import ( "context" "net" + "sort" "strings" mapset "github.com/deckarep/golang-set" @@ -16,13 +17,14 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/go-multierror" ) const ( @@ -304,6 +306,10 @@ func getWorkloadSelector(podPrefixes, podExactNames map[string]any) *pbcatalog.W for v := range podExactNames { workloads.Names = append(workloads.Names, v) } + // sort for stability + sort.Strings(workloads.Prefixes) + sort.Strings(workloads.Names) + return workloads } diff --git a/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_test.go b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_test.go index 706a9d60a0..2458444008 100644 --- a/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_test.go +++ b/control-plane/connect-inject/controllers/endpointsv2/endpoints_controller_test.go @@ -6,9 +6,10 @@ package endpointsv2 import ( "context" "fmt" - "github.com/google/go-cmp/cmp/cmpopts" "testing" + "github.com/google/go-cmp/cmp/cmpopts" + mapset "github.com/deckarep/golang-set" logrtest "github.com/go-logr/logr/testr" "github.com/google/go-cmp/cmp" @@ -26,14 +27,15 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" ) var ( diff --git a/control-plane/connect-inject/webhook_v2/consul_dataplane_sidecar_test.go b/control-plane/connect-inject/webhook_v2/consul_dataplane_sidecar_test.go index aaa94a191d..12cff4289d 100644 --- a/control-plane/connect-inject/webhook_v2/consul_dataplane_sidecar_test.go +++ b/control-plane/connect-inject/webhook_v2/consul_dataplane_sidecar_test.go @@ -722,7 +722,7 @@ func TestHandlerConsulDataplaneSidecar_UserVolumeMounts(t *testing.T) { }, expectedContainerVolumeMounts: []corev1.VolumeMount{ { - Name: "consul-connect-inject-data", + Name: "consul-mesh-inject-data", MountPath: "/consul/mesh-inject", }, { diff --git a/control-plane/connect-inject/webhook_v2/container_init.go b/control-plane/connect-inject/webhook_v2/container_init.go index ebf4b0e336..ce9145be55 100644 --- a/control-plane/connect-inject/webhook_v2/container_init.go +++ b/control-plane/connect-inject/webhook_v2/container_init.go @@ -29,13 +29,14 @@ type initContainerCommandData struct { ServiceAccountName string AuthMethod string - // Log settings for the connect-init command. + // Log settings for the mesh-init command. LogLevel string LogJSON bool } -// containerInit returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered -// so that it can save the proxy service id to the shared volume and boostrap Envoy with the proxy-id. +// containerInit returns the init container spec for mesh-init that polls for the workload's bootstrap config +// so that it optionally set up iptables for transparent proxy. Otherwise, it ensures the workload exists before +// the pod starts. func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod) (corev1.Container, error) { // Check if tproxy is enabled on this pod. tproxyEnabled, err := common.TransparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) @@ -53,7 +54,7 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod) volMounts := []corev1.VolumeMount{ { Name: volumeName, - MountPath: "/consul/connect-inject", + MountPath: "/consul/mesh-inject", }, } @@ -98,14 +99,6 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod) FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, }, }, - { - Name: "NODE_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "spec.nodeName", - }, - }, - }, { Name: "CONSUL_ADDRESSES", Value: w.ConsulAddress, @@ -122,10 +115,6 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod) Name: "CONSUL_API_TIMEOUT", Value: w.ConsulConfig.APITimeout.String(), }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, }, Resources: w.InitContainerResources, VolumeMounts: volMounts, @@ -222,7 +211,7 @@ func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod) Value: redirectTrafficConfig, }) - // Running consul connect redirect-traffic with iptables + // Running consul mesh-init redirect-traffic with iptables // requires both being a root user and having NET_ADMIN capability. container.SecurityContext = &corev1.SecurityContext{ RunAsUser: pointer.Int64(rootUserAndGroupID), @@ -290,11 +279,7 @@ func splitCommaSeparatedItemsFromAnnotation(annotation string, pod corev1.Pod) [ // initContainerCommandTpl is the template for the command executed by // the init container. const initContainerCommandTpl = ` -consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ +consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level={{ .LogLevel }} \ -log-json={{ .LogJSON }} \ - {{- if .AuthMethod }} - -service-account-name="{{ .ServiceAccountName }}" \ - -service-name="{{ .ServiceName }}" \ - {{- end }} ` diff --git a/control-plane/connect-inject/webhook_v2/container_init_test.go b/control-plane/connect-inject/webhook_v2/container_init_test.go index 6931122124..87cb83306c 100644 --- a/control-plane/connect-inject/webhook_v2/container_init_test.go +++ b/control-plane/connect-inject/webhook_v2/container_init_test.go @@ -68,7 +68,7 @@ func TestHandlerContainerInit(t *testing.T) { ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, LogLevel: "info", }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=info \ -log-json=false \`, []corev1.EnvVar{ @@ -88,10 +88,6 @@ func TestHandlerContainerInit(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "0s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, }, }, @@ -115,11 +111,9 @@ func TestHandlerContainerInit(t *testing.T) { LogLevel: "debug", LogJSON: true, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=debug \ - -log-json=true \ - -service-account-name="a-service-account-name" \ - -service-name="web" \`, + -log-json=true \`, []corev1.EnvVar{ { Name: "CONSUL_ADDRESSES", @@ -137,10 +131,6 @@ func TestHandlerContainerInit(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "5s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, { Name: "CONSUL_LOGIN_AUTH_METHOD", Value: "an-auth-method", @@ -165,7 +155,7 @@ func TestHandlerContainerInit(t *testing.T) { require.NoError(t, err) actual := strings.Join(container.Command, " ") require.Contains(t, actual, tt.ExpCmd) - require.EqualValues(t, container.Env[3:], tt.ExpEnv) + require.EqualValues(t, container.Env[2:], tt.ExpEnv) }) } } @@ -386,7 +376,7 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { ConsulAddress: "10.0.0.0", ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=info \ -log-json=false \`, []corev1.EnvVar{ @@ -406,10 +396,6 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "5s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, { Name: "CONSUL_NAMESPACE", Value: "default", @@ -429,7 +415,7 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { ConsulAddress: "10.0.0.0", ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=info \ -log-json=false \`, []corev1.EnvVar{ @@ -449,10 +435,6 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "5s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, { Name: "CONSUL_NAMESPACE", Value: "default", @@ -476,7 +458,7 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { ConsulAddress: "10.0.0.0", ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=info \ -log-json=false \`, []corev1.EnvVar{ @@ -496,10 +478,6 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "5s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, { Name: "CONSUL_NAMESPACE", Value: "non-default", @@ -519,7 +497,7 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { ConsulAddress: "10.0.0.0", ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=info \ -log-json=false \`, []corev1.EnvVar{ @@ -539,10 +517,6 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "5s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, { Name: "CONSUL_NAMESPACE", Value: "non-default", @@ -567,11 +541,9 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { ConsulAddress: "10.0.0.0", ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=info \ - -log-json=false \ - -service-account-name="web" \ - -service-name="" \`, + -log-json=false \`, []corev1.EnvVar{ { Name: "CONSUL_ADDRESSES", @@ -589,10 +561,6 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "5s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, { Name: "CONSUL_LOGIN_AUTH_METHOD", Value: "auth-method", @@ -638,11 +606,9 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { ConsulAddress: "10.0.0.0", ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + `/bin/sh -ec consul-k8s-control-plane mesh-init -proxy-name=${POD_NAME} \ -log-level=info \ - -log-json=false \ - -service-account-name="web" \ - -service-name="" \`, + -log-json=false \`, []corev1.EnvVar{ { Name: "CONSUL_ADDRESSES", @@ -660,10 +626,6 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { Name: "CONSUL_API_TIMEOUT", Value: "5s", }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, { Name: "CONSUL_LOGIN_AUTH_METHOD", Value: "auth-method", @@ -705,7 +667,7 @@ func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { actual := strings.Join(container.Command, " ") require.Equal(t, tt.Cmd, actual) if tt.ExpEnv != nil { - require.Equal(t, tt.ExpEnv, container.Env[3:]) + require.Equal(t, tt.ExpEnv, container.Env[2:]) } }) } @@ -745,18 +707,18 @@ func TestHandlerContainerInit_WithTLSAndCustomPorts(t *testing.T) { } container, err := w.containerInit(testNS, *pod) require.NoError(t, err) - require.Equal(t, "CONSUL_ADDRESSES", container.Env[3].Name) - require.Equal(t, w.ConsulAddress, container.Env[3].Value) - require.Equal(t, "CONSUL_GRPC_PORT", container.Env[4].Name) - require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.GRPCPort), container.Env[4].Value) - require.Equal(t, "CONSUL_HTTP_PORT", container.Env[5].Name) - require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.HTTPPort), container.Env[5].Value) + require.Equal(t, "CONSUL_ADDRESSES", container.Env[2].Name) + require.Equal(t, w.ConsulAddress, container.Env[2].Value) + require.Equal(t, "CONSUL_GRPC_PORT", container.Env[3].Name) + require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.GRPCPort), container.Env[3].Value) + require.Equal(t, "CONSUL_HTTP_PORT", container.Env[4].Name) + require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.HTTPPort), container.Env[4].Value) if w.TLSEnabled { - require.Equal(t, "CONSUL_USE_TLS", container.Env[8].Name) - require.Equal(t, "true", container.Env[8].Value) + require.Equal(t, "CONSUL_USE_TLS", container.Env[6].Name) + require.Equal(t, "true", container.Env[6].Value) if caProvided { - require.Equal(t, "CONSUL_CACERT_PEM", container.Env[9].Name) - require.Equal(t, "consul-ca-cert", container.Env[9].Value) + require.Equal(t, "CONSUL_CACERT_PEM", container.Env[7].Name) + require.Equal(t, "consul-ca-cert", container.Env[7].Value) } else { for _, ev := range container.Env { if ev.Name == "CONSUL_CACERT_PEM" { diff --git a/control-plane/connect-inject/webhook_v2/container_volume.go b/control-plane/connect-inject/webhook_v2/container_volume.go index 8de3d5b6f5..5c7e65c905 100644 --- a/control-plane/connect-inject/webhook_v2/container_volume.go +++ b/control-plane/connect-inject/webhook_v2/container_volume.go @@ -9,7 +9,7 @@ import ( // volumeName is the name of the volume that is created to store the // Consul Connect injection data. -const volumeName = "consul-connect-inject-data" +const volumeName = "consul-mesh-inject-data" // containerVolume returns the volume data to add to the pod. This volume // is used for shared data between containers. diff --git a/control-plane/connect-inject/webhook_v2/mesh_webhook.go b/control-plane/connect-inject/webhook_v2/mesh_webhook.go index efe33985f2..d41601b6b7 100644 --- a/control-plane/connect-inject/webhook_v2/mesh_webhook.go +++ b/control-plane/connect-inject/webhook_v2/mesh_webhook.go @@ -453,13 +453,13 @@ func (w *MeshWebhook) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) erro } func (w *MeshWebhook) injectVolumeMount(pod corev1.Pod) { - containersToInject := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationInjectMountVolumes, pod) + containersToInject := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationMeshInjectMountVolumes, pod) for index, container := range pod.Spec.Containers { if slices.Contains(containersToInject, container.Name) { pod.Spec.Containers[index].VolumeMounts = append(pod.Spec.Containers[index].VolumeMounts, corev1.VolumeMount{ Name: volumeName, - MountPath: "/consul/connect-inject", + MountPath: "/consul/mesh-inject", }) } } diff --git a/control-plane/connect-inject/webhook_v2/mesh_webhook_test.go b/control-plane/connect-inject/webhook_v2/mesh_webhook_test.go index b2b1f47392..6f3d1f339a 100644 --- a/control-plane/connect-inject/webhook_v2/mesh_webhook_test.go +++ b/control-plane/connect-inject/webhook_v2/mesh_webhook_test.go @@ -365,7 +365,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationInjectMountVolumes: "", + constants.AnnotationMeshInjectMountVolumes: "", }, }, Spec: basicSpec, @@ -419,7 +419,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationInjectMountVolumes: "web,unknown,web_three_point_oh", + constants.AnnotationMeshInjectMountVolumes: "web,unknown,web_three_point_oh", }, }, Spec: corev1.PodSpec{ @@ -1101,7 +1101,7 @@ func TestHandlerHandle_ValidateOverwriteProbes(t *testing.T) { value := actual[i].Value.([]any) valueMap := value[0].(map[string]any) envs := valueMap["env"].([]any) - redirectEnv := envs[8].(map[string]any) + redirectEnv := envs[6].(map[string]any) require.Equal(t, redirectEnv["name"].(string), "CONSUL_REDIRECT_TRAFFIC_CONFIG") iptablesJson := redirectEnv["value"].(string) diff --git a/control-plane/consul/dataplane_client.go b/control-plane/consul/dataplane_client.go new file mode 100644 index 0000000000..628d353252 --- /dev/null +++ b/control-plane/consul/dataplane_client.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "fmt" + + "github.com/hashicorp/consul/proto-public/pbdataplane" +) + +// NewDataplaneServiceClient creates a pbdataplane.DataplaneServiceClient for gathering proxy bootstrap config. +// It is initialized with a consul-server-connection-manager Watcher to continuously find Consul +// server addresses. +func NewDataplaneServiceClient(watcher ServerConnectionManager) (pbdataplane.DataplaneServiceClient, error) { + + // We recycle the GRPC connection from the discovery client because it + // should have all the necessary dial options, including the resolver that + // continuously updates Consul server addresses. Otherwise, a lot of code from consul-server-connection-manager + // would need to be duplicated + state, err := watcher.State() + if err != nil { + return nil, fmt.Errorf("unable to get connection manager state: %w", err) + } + dpClient := pbdataplane.NewDataplaneServiceClient(state.GRPCConn) + + return dpClient, nil +} diff --git a/control-plane/consul/dataplane_client_test.go b/control-plane/consul/dataplane_client_test.go new file mode 100644 index 0000000000..4e76b80125 --- /dev/null +++ b/control-plane/consul/dataplane_client_test.go @@ -0,0 +1,206 @@ +package consul + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/consul-server-connection-manager/discovery" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbdataplane" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" +) + +func Test_NewDataplaneServiceClient(t *testing.T) { + + var serverConfig *testutil.TestServerConfig + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverConfig = c + }) + require.NoError(t, err) + defer server.Stop() + + server.WaitForLeader(t) + server.WaitForActiveCARoot(t) + + t.Logf("server grpc address on %d", serverConfig.Ports.GRPC) + + // Create discovery configuration + discoverConfig := discovery.Config{ + Addresses: "127.0.0.1", + GRPCPort: serverConfig.Ports.GRPC, + } + + opts := hclog.LoggerOptions{Name: "dataplane-service-client"} + logger := hclog.New(&opts) + + watcher, err := discovery.NewWatcher(context.Background(), discoverConfig, logger) + require.NoError(t, err) + require.NotNil(t, watcher) + + defer watcher.Stop() + go watcher.Run() + + // Create a workload and create a proxyConfiguration + createWorkload(t, watcher, "foo") + pc := createProxyConfiguration(t, watcher, "foo") + + client, err := NewDataplaneServiceClient(watcher) + require.NoError(t, err) + require.NotNil(t, client) + require.NotNil(t, watcher) + + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: "foo", + Namespace: "default", + Partition: "default", + } + + res, err := client.GetEnvoyBootstrapParams(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, "foo", res.GetIdentity()) + require.Equal(t, "default", res.GetNamespace()) + require.Equal(t, "default", res.GetPartition()) + + if diff := cmp.Diff(pc.BootstrapConfig, res.GetBootstrapConfig(), protocmp.Transform()); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } + + // NOTE: currently it isn't possible to test that the grpc connection responds to changes in the + // discovery server. The discovery response only includes the IP address of the host, so all servers + // for a local test are de-duplicated as a single entry. +} + +func createWorkload(t *testing.T, watcher ServerConnectionManager, name string) { + + client, err := NewResourceServiceClient(watcher) + require.NoError(t, err) + + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "mesh": { + Port: 20000, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "k8s-node-0-virtual", + Identity: name, + } + + id := &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "catalog", + GroupVersion: "v1alpha1", + Kind: "Workload", + }, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + PeerName: "local", + }, + } + + proto, err := anypb.New(workload) + require.NoError(t, err) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: id, + Data: proto, + }, + } + + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + + resourceHasPersisted(t, client, id) +} + +func createProxyConfiguration(t *testing.T, watcher ServerConnectionManager, name string) *pbmesh.ProxyConfiguration { + + client, err := NewResourceServiceClient(watcher) + require.NoError(t, err) + + pc := &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsBindAddr: "127.0.0.2:1234", + ReadyBindAddr: "127.0.0.3:5678", + }, + } + + id := &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "mesh", + GroupVersion: "v1alpha1", + Kind: "ProxyConfiguration", + }, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + PeerName: "local", + }, + } + + proto, err := anypb.New(pc) + require.NoError(t, err) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: id, + Data: proto, + }, + } + + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + + resourceHasPersisted(t, client, id) + return pc +} + +// resourceHasPersisted checks that a recently written resource exists in the Consul +// state store with a valid version. This must be true before a resource is overwritten +// or deleted. +// TODO: refactor so that there isn't an import cycle when using test.ResourceHasPersisted. +func resourceHasPersisted(t *testing.T, client pbresource.ResourceServiceClient, id *pbresource.ID) { + req := &pbresource.ReadRequest{Id: id} + + require.Eventually(t, func() bool { + res, err := client.Read(context.Background(), req) + if err != nil { + return false + } + + if res.GetResource().GetVersion() == "" { + return false + } + + return true + }, 5*time.Second, + time.Second) +} diff --git a/control-plane/subcommand/mesh-init/command.go b/control-plane/subcommand/mesh-init/command.go new file mode 100644 index 0000000000..91ea8129cf --- /dev/null +++ b/control-plane/subcommand/mesh-init/command.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package meshinit + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/cenkalti/backoff" + "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/proto-public/pbdataplane" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/consul-k8s/control-plane/version" +) + +const ( + // The number of times to attempt to read this proxy registration (120s). + defaultMaxPollingRetries = 120 +) + +type Command struct { + UI cli.Ui + + flagProxyName string + + maxPollingAttempts uint64 // Number of times to poll Consul for proxy registrations. + + flagRedirectTrafficConfig string + flagLogLevel string + flagLogJSON bool + + flagSet *flag.FlagSet + consul *flags.ConsulFlags + + once sync.Once + help string + logger hclog.Logger + + watcher *discovery.Watcher + + // Only used in tests. + iptablesProvider iptables.Provider + iptablesConfig iptables.Config +} + +func (c *Command) init() { + c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + + // V2 Flags + c.flagSet.StringVar(&c.flagProxyName, "proxy-name", os.Getenv("PROXY_NAME"), "The Consul proxy name. This is the K8s Pod name, which is also the name of the Workload in Consul. (Required)") + + // Universal flags + c.flagSet.StringVar(&c.flagRedirectTrafficConfig, "redirect-traffic-config", os.Getenv("CONSUL_REDIRECT_TRAFFIC_CONFIG"), "Config (in JSON format) to configure iptables for this pod.") + c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\".") + c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + if c.maxPollingAttempts == 0 { + c.maxPollingAttempts = defaultMaxPollingRetries + } + + c.consul = &flags.ConsulFlags{} + flags.Merge(c.flagSet, c.consul.Flags()) + c.help = flags.Usage(help, c.flagSet) +} + +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + + if err := c.flagSet.Parse(args); err != nil { + return 1 + } + // Validate flags + if err := c.validateFlags(); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if c.consul.Namespace == "" { + c.consul.Namespace = constants.DefaultConsulNS + } + if c.consul.Partition == "" { + c.consul.Partition = constants.DefaultConsulPartition + } + + // Set up logging. + if c.logger == nil { + var err error + c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + + // Create Consul API config object. + consulConfig := c.consul.ConsulClientConfig() + + // Create a context to be used by the processes started in this command. + ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancelFunc() + + // Start Consul server Connection manager. + serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() + // Disable server watch because we only need to get server IPs once. + serverConnMgrCfg.ServerWatchDisabled = true + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) + return 1 + } + if c.watcher == nil { + c.watcher, err = discovery.NewWatcher(ctx, serverConnMgrCfg, c.logger.Named("consul-server-connection-manager")) + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + return 1 + } + go c.watcher.Run() // The actual ACL login happens here + defer c.watcher.Stop() + } + + state, err := c.watcher.State() + if err != nil { + c.logger.Error("Unable to get state from consul-server-connection-manager", "error", err) + return 1 + } + + consulClient, err := consul.NewClientFromConnMgrState(consulConfig, state) + if err != nil { + c.logger.Error("Unable to get client connection", "error", err) + return 1 + } + + if version.IsFIPS() { + // make sure we are also using FIPS Consul + var versionInfo map[string]interface{} + _, err := consulClient.Raw().Query("/v1/agent/version", versionInfo, nil) + if err != nil { + c.logger.Warn("This is a FIPS build of consul-k8s, which should be used with FIPS Consul. Unable to verify FIPS Consul while setting up Consul API client.") + } + if val, ok := versionInfo["FIPS"]; !ok || val == "" { + c.logger.Warn("This is a FIPS build of consul-k8s, which should be used with FIPS Consul. A non-FIPS version of Consul was detected.") + } + } + + // todo (agentless): this should eventually be passed to consul-dataplane as a string so we don't need to write it to file. + if c.consul.UseTLS && c.consul.CACertPEM != "" { + if err = common.WriteFileWithPerms(constants.ConsulCAFile, c.consul.CACertPEM, 0444); err != nil { + c.logger.Error("error writing CA cert file", "error", err) + return 1 + } + } + + dc, err := consul.NewDataplaneServiceClient(c.watcher) + if err != nil { + c.logger.Error("failed to create resource client", "error", err) + return 1 + } + + var bootstrapConfig pbmesh.BootstrapConfig + if err := backoff.Retry(c.getBootstrapParams(dc, &bootstrapConfig), backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.maxPollingAttempts)); err != nil { + c.logger.Error("Timed out waiting for bootstrap parameters", "error", err) + return 1 + } + + if c.flagRedirectTrafficConfig != "" { + err := c.applyTrafficRedirectionRules(&bootstrapConfig) // BootstrapConfig is always populated non-nil from the RPC + if err != nil { + c.logger.Error("error applying traffic redirection rules", "err", err) + return 1 + } + } + + c.logger.Info("Proxy initialization completed") + return 0 +} + +func (c *Command) validateFlags() error { + if c.flagProxyName == "" { + return errors.New("-proxy-name must be set") + } + return nil +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +func (c *Command) getBootstrapParams( + client pbdataplane.DataplaneServiceClient, + bootstrapConfig *pbmesh.BootstrapConfig) backoff.Operation { + + return func() error { + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: c.flagProxyName, + Namespace: c.consul.Namespace, + Partition: c.consul.Partition, + } + res, err := client.GetEnvoyBootstrapParams(context.Background(), req) + if err != nil { + c.logger.Error("Unable to get bootstrap parameters", "error", err) + return err + } + if res.GetBootstrapConfig() != nil { + *bootstrapConfig = *res.GetBootstrapConfig() + } + return nil + } +} + +// This below implementation is loosely based on +// https://github.com/hashicorp/consul/blob/fe2d41ddad9ba2b8ff86cbdebbd8f05855b1523c/command/connect/redirecttraffic/redirect_traffic.go#L136. + +func (c *Command) applyTrafficRedirectionRules(config *pbmesh.BootstrapConfig) error { + + err := json.Unmarshal([]byte(c.flagRedirectTrafficConfig), &c.iptablesConfig) + if err != nil { + return err + } + if c.iptablesProvider != nil { + c.iptablesConfig.IptablesProvider = c.iptablesProvider + } + + // TODO: provide dynamic updates to the c.iptablesConfig.ProxyOutboundPort + // We currently don't have a V2 endpoint that can gather the fully synthesized ProxyConfiguration. + // We need this to dynamically set c.iptablesConfig.ProxyOutboundPort with the outbound port configuration from + // pbmesh.DynamicConfiguration.TransparentProxy.OutboundListenerPort. + // We would either need to grab another resource that has this information rendered in it, or add + // pbmesh.DynamicConfiguration to the GetBootstrapParameters rpc. + // Right now this is an edge case because the mesh webhook configured the flagRedirectTrafficConfig with the default + // 15001 port. + + // TODO: provide dyanmic updates to the c.iptablesConfig.ProxyInboundPort + // This is the `mesh` port in the workload resource. + // Right now this will always be the default port (20000) + + if config.StatsBindAddr != "" { + _, port, err := net.SplitHostPort(config.StatsBindAddr) + if err != nil { + return fmt.Errorf("failed parsing host and port from StatsBindAddr: %s", err) + } + + c.iptablesConfig.ExcludeInboundPorts = append(c.iptablesConfig.ExcludeInboundPorts, port) + } + + // Configure any relevant information from the proxy service + err = iptables.Setup(c.iptablesConfig) + if err != nil { + return err + } + c.logger.Info("Successfully applied traffic redirection rules") + return nil +} + +const synopsis = "Inject mesh init command." +const help = ` +Usage: consul-k8s-control-plane mesh-init [options] + + Bootstraps mesh-injected pod components. + Uses V2 Consul Catalog APIs. + Not intended for stand-alone use. +` diff --git a/control-plane/subcommand/mesh-init/command_ent_test.go b/control-plane/subcommand/mesh-init/command_ent_test.go new file mode 100644 index 0000000000..ad3ea8c87d --- /dev/null +++ b/control-plane/subcommand/mesh-init/command_ent_test.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package meshinit + +import ( + "context" + "strconv" + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +func TestRun_WithNamespaces(t *testing.T) { + t.Parallel() + cases := []struct { + name string + consulNamespace string + consulPartition string + }{ + { + name: "default ns, default partition", + consulNamespace: constants.DefaultConsulNS, + consulPartition: constants.DefaultConsulPartition, + }, + { + name: "non-default ns, default partition", + consulNamespace: "bar", + consulPartition: constants.DefaultConsulPartition, + }, + { + name: "non-default ns, non-default partition", + consulNamespace: "bar", + consulPartition: "baz", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + _, err = EnsurePartitionExists(testClient.APIClient, c.consulPartition) + require.NoError(t, err) + + partitionedCfg := testClient.Cfg.APIClientConfig + partitionedCfg.Partition = c.consulPartition + + partitionedClient, err := api.NewClient(partitionedCfg) + require.NoError(t, err) + + _, err = namespaces.EnsureExists(partitionedClient, c.consulNamespace, "") + require.NoError(t, err) + + // Register Consul workload. + loadResource(t, resourceClient, getWorkloadID(testPodName, c.consulNamespace, c.consulPartition), getWorkload(), nil) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 5, + } + // We build the consul-addr because normally it's defined by the init container setting + // CONSUL_HTTP_ADDR when it processes the command template. + flags := []string{"-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-namespace", c.consulNamespace, + "-partition", c.consulPartition, + } + + // Run the command. + code := cmd.Run(flags) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + }) + } +} + +// EnsurePartitionExists ensures a Consul partition exists. +// Boolean return value indicates if the partition was created by this call. +// This is borrowed from namespaces.EnsureExists +func EnsurePartitionExists(client *api.Client, name string) (bool, error) { + if name == constants.DefaultConsulPartition { + return false, nil + } + // Check if the Consul namespace exists. + partitionInfo, _, err := client.Partitions().Read(context.Background(), name, nil) + if err != nil { + return false, err + } + if partitionInfo != nil { + return false, nil + } + + consulPartition := api.Partition{ + Name: name, + Description: "Auto-generated by consul-k8s", + } + + _, _, err = client.Partitions().Create(context.Background(), &consulPartition, nil) + return true, err +} diff --git a/control-plane/subcommand/mesh-init/command_test.go b/control-plane/subcommand/mesh-init/command_test.go new file mode 100644 index 0000000000..ec280d70ac --- /dev/null +++ b/control-plane/subcommand/mesh-init/command_test.go @@ -0,0 +1,425 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package meshinit + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "testing" + "time" + + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestRun_FlagValidation(t *testing.T) { + t.Parallel() + cases := []struct { + flags []string + env string + expErr string + }{ + { + flags: []string{}, + expErr: "-proxy-name must be set", + }, + { + flags: []string{ + "-proxy-name", testPodName, + "-log-level", "invalid", + }, + expErr: "unknown log level: invalid", + }, + } + for _, c := range cases { + t.Run(c.expErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + code := cmd.Run(c.flags) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), c.expErr) + }) + } +} + +// TestRun_MeshServices tests that the command can log in to Consul (if ACLs are enabled) using a kubernetes +// auth method and, using the obtained token, make call to the dataplane GetBootstrapParams() RPC. +func TestRun_MeshServices(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + workload *pbcatalog.Workload + proxyConfiguration *pbmesh.ProxyConfiguration + aclsEnabled bool + expFail bool + }{ + { + name: "basic workload bootstrap", + workload: getWorkload(), + }, + { + name: "workload and proxyconfiguration bootstrap", + workload: getWorkload(), + proxyConfiguration: getProxyConfiguration(), + }, + { + name: "missing workload", + expFail: true, + }, + // TODO: acls enabled + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + //tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) + //t.Cleanup(func() { + // _ = os.RemoveAll(tokenFile) + //}) + + // Create test consulServer server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + loadResource(t, resourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tt.workload, nil) + loadResource(t, resourceClient, getProxyConfigurationID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tt.proxyConfiguration, nil) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 3, + } + + // We build the consul-addr because normally it's defined by the init container setting + // CONSUL_HTTP_ADDR when it processes the command template. + flags := []string{"-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + } + //if tt.aclsEnabled { + // flags = append(flags, "-auth-method-name", test.AuthMethod, + // "-service-account-name", tt.serviceAccountName, + // "-acl-token-sink", tokenFile) //TODO: what happens if this is unspecified? We don't need this file + //} + + // Run the command. + code := cmd.Run(flags) + if tt.expFail { + require.Equal(t, 1, code) + return + } + require.Equal(t, 0, code, ui.ErrorWriter.String()) + + // TODO: Can we remove the tokenFile from this workflow? + // consul-dataplane performs it's own login using the Serviceaccount bearer token + //if tt.aclsEnabled { + // // Validate the ACL token was written. + // tokenData, err := os.ReadFile(tokenFile) + // require.NoError(t, err) + // require.NotEmpty(t, tokenData) + // + // // Check that the token has the metadata with pod name and pod namespace. + // consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData)}) + // require.NoError(t, err) + // token, _, err := consulClient.ACL().TokenReadSelf(nil) + // require.NoError(t, err) + // require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) + //} + }) + } +} + +// TestRun_RetryServicePolling runs the command but does not register the consul service +// for 2 seconds and then asserts the command exits successfully. +func TestRun_RetryServicePolling(t *testing.T) { + t.Parallel() + + // Start Consul server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + // Start the consul service registration in a go func and delay it so that it runs + // after the cmd.Run() starts. + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + // Wait a moment, this ensures that we are already in the retry logic. + time.Sleep(time.Second * 2) + // Register counting service. + loadResource(t, resourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getWorkload(), nil) + }() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 10, + } + flags := []string{ + "-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + } + code := cmd.Run(flags) + wg.Wait() + require.Equal(t, 0, code) +} + +func TestRun_TrafficRedirection(t *testing.T) { + cases := map[string]struct { + registerProxyConfiguration bool + expIptablesParamsFunc func(actual iptables.Config) error + }{ + "no proxyConfiguration provided": { + expIptablesParamsFunc: func(actual iptables.Config) error { + if len(actual.ExcludeInboundPorts) != 0 { + return fmt.Errorf("ExcludeInboundPorts in iptables.Config was %v, but should be empty", actual.ExcludeInboundPorts) + } + if actual.ProxyInboundPort != 20000 { + return fmt.Errorf("ProxyInboundPort in iptables.Config was %d, but should be [20000]", actual.ProxyOutboundPort) + } + if actual.ProxyOutboundPort != 15001 { + return fmt.Errorf("ProxyOutboundPort in iptables.Config was %d, but should be [15001]", actual.ProxyOutboundPort) + } + return nil + }, + }, + "stats bind port is provided in proxyConfiguration": { + registerProxyConfiguration: true, + expIptablesParamsFunc: func(actual iptables.Config) error { + if len(actual.ExcludeInboundPorts) != 1 || actual.ExcludeInboundPorts[0] != "9090" { + return fmt.Errorf("ExcludeInboundPorts in iptables.Config was %v, but should be [9090, 1234]", actual.ExcludeInboundPorts) + } + if actual.ProxyInboundPort != 20000 { + return fmt.Errorf("ProxyInboundPort in iptables.Config was %d, but should be [20000]", actual.ProxyOutboundPort) + } + if actual.ProxyOutboundPort != 15001 { + return fmt.Errorf("ProxyOutboundPort in iptables.Config was %d, but should be [15001]", actual.ProxyOutboundPort) + } + return nil + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + // Start Consul server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + // Add additional proxy configuration either to a config entry or to the service itself. + if c.registerProxyConfiguration { + loadResource(t, resourceClient, getProxyConfigurationID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getProxyConfiguration(), nil) + } + + // Register Consul workload. + loadResource(t, resourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getWorkload(), nil) + + iptablesProvider := &fakeIptablesProvider{} + iptablesCfg := iptables.Config{ + ProxyUserID: "5995", + ProxyInboundPort: 20000, + ProxyOutboundPort: 15001, + } + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 3, + iptablesProvider: iptablesProvider, + } + iptablesCfgJSON, err := json.Marshal(iptablesCfg) + require.NoError(t, err) + flags := []string{ + "-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-redirect-traffic-config", string(iptablesCfgJSON), + } + code := cmd.Run(flags) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + require.Truef(t, iptablesProvider.applyCalled, "redirect traffic rules were not applied") + if c.expIptablesParamsFunc != nil { + errMsg := c.expIptablesParamsFunc(cmd.iptablesConfig) + require.NoError(t, errMsg) + } + }) + } +} + +const ( + testPodName = "foo" +) + +type fakeIptablesProvider struct { + applyCalled bool + rules []string +} + +func loadResource(t *testing.T, client pbresource.ResourceServiceClient, id *pbresource.ID, proto proto.Message, owner *pbresource.ID) { + if id == nil || !proto.ProtoReflect().IsValid() { + return + } + + data, err := anypb.New(proto) + require.NoError(t, err) + + resource := &pbresource.Resource{ + Id: id, + Data: data, + Owner: owner, + } + + req := &pbresource.WriteRequest{Resource: resource} + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + test.ResourceHasPersisted(t, client, id) +} + +func getWorkloadID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "catalog", + GroupVersion: "v1alpha1", + Kind: "Workload", + }, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + + // Because we are explicitly defining NS/partition, this will not default and must be explicit. + // At a future point, this will move out of the Tenancy block. + PeerName: constants.DefaultConsulPeer, + }, + } +} + +// getWorkload creates a proxyConfiguration that matches the pod from createPod, +// assuming that metrics, telemetry, and overwrite probes are enabled separately. +func getWorkload() *pbcatalog.Workload { + return &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "k8s-node-0", + Identity: testPodName, + } +} + +func getProxyConfigurationID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "mesh", + GroupVersion: "v1alpha1", + Kind: "ProxyConfiguration", + }, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + + // Because we are explicitly defining NS/partition, this will not default and must be explicit. + // At a future point, this will move out of the Tenancy block. + PeerName: constants.DefaultConsulPeer, + }, + } +} + +// getProxyConfiguration creates a proxyConfiguration that matches the pod from createWorkload. +func getProxyConfiguration() *pbmesh.ProxyConfiguration { + return &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{testPodName}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 20400, + LocalPathPort: 2001, + Path: "/livez", + }, + { + ListenerPort: 20300, + LocalPathPort: 2000, + Path: "/readyz", + }, + { + ListenerPort: 20500, + LocalPathPort: 2002, + Path: "/startupz", + }, + }, + }, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsBindAddr: "0.0.0.0:9090", + PrometheusBindAddr: "0.0.0.0:21234", // This gets added to the iptables exclude directly in the webhook + }, + } +} + +func (f *fakeIptablesProvider) AddRule(_ string, args ...string) { + f.rules = append(f.rules, strings.Join(args, " ")) +} + +func (f *fakeIptablesProvider) ApplyRules() error { + f.applyCalled = true + return nil +} + +func (f *fakeIptablesProvider) Rules() []string { + return f.rules +} diff --git a/control-plane/subcommand/partition-init/command.go b/control-plane/subcommand/partition-init/command.go index 0684f8b1bb..0aa8cdc724 100644 --- a/control-plane/subcommand/partition-init/command.go +++ b/control-plane/subcommand/partition-init/command.go @@ -31,8 +31,6 @@ type Command struct { flagLogJSON bool flagTimeout time.Duration - flagResourceAPIs bool // Use V2 APIs - // ctx is cancelled when the command timeout is reached. ctx context.Context retryDuration time.Duration @@ -54,8 +52,6 @@ func (c *Command) init() { "\"debug\", \"info\", \"warn\", and \"error\".") c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.flags.BoolVar(&c.flagResourceAPIs, "enable-resource-apis", false, - "Enable of disable V2 Resource APIs.") c.consul = &flags.ConsulFlags{} flags.Merge(c.flags, c.consul.Flags()) @@ -177,11 +173,6 @@ func (c *Command) validateFlags() error { return errors.New("-api-timeout must be set to a value greater than 0") } - // TODO(dans) this needs to be replaced when the partition workflow is available. - if c.flagResourceAPIs { - return errors.New("partition-init is not implemented when the -enable-resource-apis flag is set for V2 Resource APIs") - } - return nil }