From ae1bb29da31693207696eb27f6aaef2c39750291 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 5 Jun 2023 15:24:10 -0400 Subject: [PATCH 01/19] Reorder Container order with Deployments that use OAuth Proxy - Allows for easier debugging --- .../internal/apiserver/deployment.yaml.tmpl | 96 +++++++++---------- .../mlpipelines-ui/deployment.yaml.tmpl | 92 +++++++++--------- .../created/apiserver_deployment.yaml | 94 +++++++++--------- .../created/apiserver_deployment.yaml | 94 +++++++++--------- .../created/mlpipelines-ui_deployment.yaml | 94 +++++++++--------- .../created/apiserver_deployment.yaml | 94 +++++++++--------- .../created/apiserver_deployment.yaml | 94 +++++++++--------- .../created/mlpipelines-ui_deployment.yaml | 94 +++++++++--------- 8 files changed, 376 insertions(+), 376 deletions(-) diff --git a/config/internal/apiserver/deployment.yaml.tmpl b/config/internal/apiserver/deployment.yaml.tmpl index fb667ace3..0a18293ec 100644 --- a/config/internal/apiserver/deployment.yaml.tmpl +++ b/config/internal/apiserver/deployment.yaml.tmpl @@ -18,54 +18,6 @@ spec: component: data-science-pipelines spec: containers: - {{ if .APIServer.EnableRoute }} - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-{{.Name}} - - --upstream=http://localhost:8888 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-{{.Name}}","namespace":"{{.Namespace}}"}}' - - '--openshift-sar={"namespace":"{{.Namespace}}","resource":"routes","resourceName":"ds-pipeline-{{.Name}}","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: {{.OAuthProxy}} - ports: - - containerPort: 8443 - name: oauth - livenessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - {{ end }} - env: - name: POD_NAMESPACE value: "{{.Namespace}}" @@ -198,6 +150,54 @@ spec: - name: sample-pipeline mountPath: /samples/ {{ end }} + {{ if .APIServer.EnableRoute }} + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-{{.Name}} + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-{{.Name}}","namespace":"{{.Namespace}}"}}' + - '--openshift-sar={"namespace":"{{.Namespace}}","resource":"routes","resourceName":"ds-pipeline-{{.Name}}","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: {{.OAuthProxy}} + ports: + - containerPort: 8443 + name: oauth + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + {{ end }} serviceAccountName: ds-pipeline-{{.Name}} volumes: - name: proxy-tls diff --git a/config/internal/mlpipelines-ui/deployment.yaml.tmpl b/config/internal/mlpipelines-ui/deployment.yaml.tmpl index 25a48e43d..aa9deebc9 100644 --- a/config/internal/mlpipelines-ui/deployment.yaml.tmpl +++ b/config/internal/mlpipelines-ui/deployment.yaml.tmpl @@ -20,52 +20,6 @@ spec: component: data-science-pipelines spec: containers: - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-ui-{{.Name}} - - --upstream=http://localhost:3000 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-{{.Name}}","namespace":"{{.Namespace}}"}}' - - '--openshift-sar={"namespace":"{{.Namespace}}","resource":"routes","resourceName":"ds-pipeline-ui-{{.Name}}","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: {{.OAuthProxy}} - ports: - - containerPort: 8443 - name: https - livenessProbe: - httpGet: - path: /oauth/healthz - port: 8443 - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: 8443 - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - env: - name: VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH value: /etc/config/viewer-pod-template.json @@ -143,6 +97,52 @@ spec: - mountPath: /etc/config name: config-volume readOnly: true + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-ui-{{.Name}} + - --upstream=http://localhost:3000 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-{{.Name}}","namespace":"{{.Namespace}}"}}' + - '--openshift-sar={"namespace":"{{.Namespace}}","resource":"routes","resourceName":"ds-pipeline-ui-{{.Name}}","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: {{.OAuthProxy}} + ports: + - containerPort: 8443 + name: https + livenessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls serviceAccountName: ds-pipeline-ui-{{.Name}} volumes: - configMap: diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index 9b5d040ff..3db41f0a8 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -18,53 +18,6 @@ spec: component: data-science-pipelines spec: containers: - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-testdsp0 - - --upstream=http://localhost:8888 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp0","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp0","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test0 - ports: - - containerPort: 8443 - name: oauth - protocol: TCP - livenessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - env: - name: POD_NAMESPACE value: "default" @@ -185,6 +138,53 @@ spec: subPath: sample_config.json - mountPath: /samples/ name: sample-pipeline + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp0 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp0","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp0","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test0 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls volumes: - name: proxy-tls secret: diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index a41a8fd9d..0ae552568 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -18,53 +18,6 @@ spec: component: data-science-pipelines spec: containers: - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-testdsp2 - - --upstream=http://localhost:8888 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp2","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp2","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test2 - ports: - - containerPort: 8443 - name: oauth - protocol: TCP - livenessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - env: - name: POD_NAMESPACE value: "default" @@ -185,6 +138,53 @@ spec: subPath: sample_config.json - mountPath: /samples/ name: sample-pipeline + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp2 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp2","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp2","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test2 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls volumes: - name: proxy-tls secret: diff --git a/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml index 931b9ae5d..f7060c384 100644 --- a/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml @@ -20,53 +20,6 @@ spec: component: data-science-pipelines spec: containers: - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-ui-testdsp2 - - --upstream=http://localhost:3000 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp2","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp2","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test2 - ports: - - containerPort: 8443 - name: https - protocol: TCP - livenessProbe: - httpGet: - path: /oauth/healthz - port: 8443 - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: 8443 - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - env: - name: VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH value: /etc/config/viewer-pod-template.json @@ -134,6 +87,53 @@ spec: - mountPath: /etc/config name: config-volume readOnly: true + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-ui-testdsp2 + - --upstream=http://localhost:3000 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp2","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp2","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test2 + ports: + - containerPort: 8443 + name: https + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls serviceAccountName: ds-pipeline-ui-testdsp2 volumes: - configMap: diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index e6c1cac4b..accfd5e7b 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -18,53 +18,6 @@ spec: component: data-science-pipelines spec: containers: - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-testdsp3 - - --upstream=http://localhost:8888 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp3","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp3","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test3 - ports: - - containerPort: 8443 - name: oauth - protocol: TCP - livenessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - env: - name: POD_NAMESPACE value: "default" @@ -179,6 +132,53 @@ spec: limits: cpu: 500m memory: 1Gi + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp3 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp3","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp3","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test3 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls volumes: - name: proxy-tls secret: diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index fc3388e95..dd80d9d21 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -18,53 +18,6 @@ spec: component: data-science-pipelines spec: containers: - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-testdsp4 - - --upstream=http://localhost:8888 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp4","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp4","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test4 - ports: - - containerPort: 8443 - name: oauth - protocol: TCP - livenessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: oauth - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - env: - name: POD_NAMESPACE value: "default" @@ -179,6 +132,53 @@ spec: limits: cpu: 2522m memory: 5Gi + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp4 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp4","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp4","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test4 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls volumes: - name: proxy-tls secret: diff --git a/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml index 315c24601..721891e96 100644 --- a/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml @@ -20,53 +20,6 @@ spec: component: data-science-pipelines spec: containers: - - name: oauth-proxy - args: - - --https-address=:8443 - - --provider=openshift - - --openshift-service-account=ds-pipeline-ui-testdsp4 - - --upstream=http://localhost:3000 - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp4","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp4","verb":"get","resourceAPIGroup":"route.openshift.io"}' - - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test4 - ports: - - containerPort: 8443 - name: https - protocol: TCP - livenessProbe: - httpGet: - path: /oauth/healthz - port: 8443 - scheme: HTTPS - initialDelaySeconds: 30 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /oauth/healthz - port: 8443 - scheme: HTTPS - initialDelaySeconds: 5 - timeoutSeconds: 1 - periodSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - env: - name: VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH value: /etc/config/viewer-pod-template.json @@ -134,6 +87,53 @@ spec: - mountPath: /etc/config name: config-volume readOnly: true + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-ui-testdsp4 + - --upstream=http://localhost:3000 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp4","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp4","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test4 + ports: + - containerPort: 8443 + name: https + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls serviceAccountName: ds-pipeline-ui-testdsp4 volumes: - configMap: From aec768a35e4e8d572763d9c6141e81e5baaf02cd Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Fri, 26 May 2023 19:23:55 -0400 Subject: [PATCH 02/19] Add Optional ML-Metadata Component --- api/v1alpha1/dspipeline_types.go | 32 ++++ api/v1alpha1/zz_generated.deepcopy.go | 95 ++++++++++++ config/base/kustomization.yaml | 21 +++ config/base/params.env | 3 + config/configmaps/files/config.yaml | 3 + ...b.io_datasciencepipelinesapplications.yaml | 145 ++++++++++++++++++ .../metadata-envoy.deployment.yaml.tmpl | 61 ++++++++ .../metadata-envoy.service.yaml.tmpl | 17 ++ .../metadata-grpc.deployment.yaml.tmpl | 82 ++++++++++ .../metadata-grpc.service.yaml.tmpl | 17 ++ .../metadata-grpc.serviceaccount.yaml.tmpl | 8 + .../metadata-writer.deployment.yaml.tmpl | 72 +++++++++ .../metadata-writer.role.yaml.tmpl | 48 ++++++ .../metadata-writer.rolebinding.yaml.tmpl | 15 ++ .../metadata-writer.serviceaccount.yaml.tmpl | 8 + .../mlpipelines-ui/deployment.yaml.tmpl | 4 + config/manager/manager.yaml | 6 + controllers/config/defaults.go | 8 + controllers/dspipeline_controller.go | 11 +- controllers/dspipeline_params.go | 51 +++++- controllers/mlmd.go | 51 ++++++ .../created/mlpipelines-ui_deployment.yaml | 4 + .../created/mlpipelines-ui_deployment.yaml | 4 + kfdef/kfdef.yaml | 6 + 24 files changed, 768 insertions(+), 4 deletions(-) create mode 100644 config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-envoy.service.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-grpc.service.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-grpc.serviceaccount.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-writer.role.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-writer.rolebinding.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-writer.serviceaccount.yaml.tmpl create mode 100644 controllers/mlmd.go diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 89b0b59a4..4bdfde796 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -35,6 +35,9 @@ type DSPASpec struct { *MlPipelineUI `json:"mlpipelineUI"` // +kubebuilder:validation:Required *ObjectStorage `json:"objectStorage"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:={deploy: false} + *MLMD `json:"mlmd"` } type APIServer struct { @@ -164,6 +167,35 @@ type Minio struct { Image string `json:"image"` } +type MLMD struct { + // +kubebuilder:default:=false + // +kubebuilder:validation:Optional + Deploy bool `json:"deploy"` + *Envoy `json:"envoy,omitempty"` + *GRPC `json:"grpc,omitempty"` + *Writer `json:"writer,omitempty"` +} + +type Envoy struct { + Resources *ResourceRequirements `json:"resources,omitempty"` + // +kubebuilder:validation:Required + Image string `json:"image"` +} + +type GRPC struct { + Resources *ResourceRequirements `json:"resources,omitempty"` + // +kubebuilder:validation:Required + Image string `json:"image"` + // +kubebuilder:validation:Optional + Port string `json:"port"` +} + +type Writer struct { + Resources *ResourceRequirements `json:"resources,omitempty"` + // +kubebuilder:validation:Required + Image string `json:"image"` +} + // ResourceRequirements structures compute resource requirements. // Replaces ResourceRequirements from corev1 which also includes optional storage field. // We handle storage field separately, and should not include it as a subfield for Resources. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 8d32f5029..2326c7f27 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -99,6 +99,11 @@ func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = new(ObjectStorage) (*in).DeepCopyInto(*out) } + if in.MLMD != nil { + in, out := &in.MLMD, &out.MLMD + *out = new(MLMD) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSPASpec. @@ -217,6 +222,26 @@ func (in *Database) DeepCopy() *Database { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Envoy) DeepCopyInto(out *Envoy) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Envoy. +func (in *Envoy) DeepCopy() *Envoy { + if in == nil { + return nil + } + out := new(Envoy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalDB) DeepCopyInto(out *ExternalDB) { *out = *in @@ -257,6 +282,56 @@ func (in *ExternalStorage) DeepCopy() *ExternalStorage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPC) DeepCopyInto(out *GRPC) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPC. +func (in *GRPC) DeepCopy() *GRPC { + if in == nil { + return nil + } + out := new(GRPC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MLMD) DeepCopyInto(out *MLMD) { + *out = *in + if in.Envoy != nil { + in, out := &in.Envoy, &out.Envoy + *out = new(Envoy) + (*in).DeepCopyInto(*out) + } + if in.GRPC != nil { + in, out := &in.GRPC, &out.GRPC + *out = new(GRPC) + (*in).DeepCopyInto(*out) + } + if in.Writer != nil { + in, out := &in.Writer, &out.Writer + *out = new(Writer) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MLMD. +func (in *MLMD) DeepCopy() *MLMD { + if in == nil { + return nil + } + out := new(MLMD) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MariaDB) DeepCopyInto(out *MariaDB) { *out = *in @@ -465,3 +540,23 @@ func (in *SecretKeyValue) DeepCopy() *SecretKeyValue { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Writer) DeepCopyInto(out *Writer) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Writer. +func (in *Writer) DeepCopy() *Writer { + if in == nil { + return nil + } + out := new(Writer) + in.DeepCopyInto(out) + return out +} diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index a2de720b0..5ca9aafac 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -71,6 +71,27 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGES_MARIADB + - name: IMAGES_MLMDENVOY + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGES_MLMDENVOY + - name: IMAGES_MLMDGRPC + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGES_MLMDGRPC + - name: IMAGES_MLMDWRITER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGES_MLMDWRITER - name: IMAGES_DSPO objref: kind: ConfigMap diff --git a/config/base/params.env b/config/base/params.env index c0509360a..5605f25a6 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -7,3 +7,6 @@ IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1-188 IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:main IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy:v4.12.0 +IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:1.7.0 +IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:1.0.0 +IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:1.1.0 diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 104a1fabc..477512b54 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -7,3 +7,6 @@ Images: Cache: $(IMAGES_CACHE) MoveResultsImage: $(IMAGES_MOVERESULTSIMAGE) MariaDB: $(IMAGES_MARIADB) + MlmdEnvoy: $(IMAGES_MLMDENVOY) + MlmdGRPC: $(IMAGES_MLMDGRPC) + MlmdWriter: $(IMAGES_MLMDWRITER) diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index e11642bf2..b8d4db52f 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -235,6 +235,151 @@ spec: type: string type: object type: object + mlmd: + default: + deploy: false + properties: + deploy: + default: false + type: boolean + envoy: + properties: + image: + type: string + resources: + description: ResourceRequirements structures compute resource + requirements. Replaces ResourceRequirements from corev1 + which also includes optional storage field. We handle storage + field separately, and should not include it as a subfield + for Resources. + properties: + limits: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + required: + - image + type: object + grpc: + properties: + image: + type: string + port: + type: string + resources: + description: ResourceRequirements structures compute resource + requirements. Replaces ResourceRequirements from corev1 + which also includes optional storage field. We handle storage + field separately, and should not include it as a subfield + for Resources. + properties: + limits: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + required: + - image + type: object + writer: + properties: + image: + type: string + resources: + description: ResourceRequirements structures compute resource + requirements. Replaces ResourceRequirements from corev1 + which also includes optional storage field. We handle storage + field separately, and should not include it as a subfield + for Resources. + properties: + limits: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + required: + - image + type: object + type: object mlpipelineUI: properties: configMap: diff --git a/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl b/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl new file mode 100644 index 000000000..e109b767a --- /dev/null +++ b/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-metadata-envoy-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-envoy-{{.Name}} + component: data-science-pipelines +spec: + replicas: 1 + selector: + matchLabels: + app: ds-pipeline-metadata-envoy-{{.Name}} + component: data-science-pipelines + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + labels: + app: ds-pipeline-metadata-envoy-{{.Name}} + component: data-science-pipelines + spec: + containers: + - image: {{.MLMD.Envoy.Image}} + name: container + ports: + - containerPort: 9090 + name: md-envoy + - containerPort: 9901 + name: envoy-admin + livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 5 + tcpSocket: + port: md-envoy + timeoutSeconds: 2 + readinessProbe: + initialDelaySeconds: 3 + periodSeconds: 5 + tcpSocket: + port: md-envoy + timeoutSeconds: 2 + resources: + {{ if .MLMD.Envoy.Resources.Requests }} + requests: + {{ if .MLMD.Envoy.Resources.Requests.CPU }} + cpu: {{.MLMD.Envoy.Resources.Requests.CPU}} + {{ end }} + {{ if .MLMD.Envoy.Resources.Requests.Memory }} + memory: {{.MLMD.Envoy.Resources.Requests.Memory}} + {{ end }} + {{ end }} + {{ if .MLMD.Envoy.Resources.Limits }} + limits: + {{ if .MLMD.Envoy.Resources.Limits.CPU }} + cpu: {{.MLMD.Envoy.Resources.Limits.CPU}} + {{ end }} + {{ if .MLMD.Envoy.Resources.Limits.Memory }} + memory: {{.MLMD.Envoy.Resources.Limits.Memory}} + {{ end }} + {{ end }} diff --git a/config/internal/ml-metadata/metadata-envoy.service.yaml.tmpl b/config/internal/ml-metadata/metadata-envoy.service.yaml.tmpl new file mode 100644 index 000000000..3813d89c1 --- /dev/null +++ b/config/internal/ml-metadata/metadata-envoy.service.yaml.tmpl @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ds-pipeline-metadata-envoy-{{.Name}} + component: data-science-pipelines + name: ds-pipeline-metadata-envoy-{{.Name}} + namespace: {{.Namespace}} +spec: + ports: + - name: md-envoy + port: 9090 + protocol: TCP + selector: + app: ds-pipeline-metadata-envoy-{{.Name}} + component: data-science-pipelines + type: ClusterIP diff --git a/config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl b/config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl new file mode 100644 index 000000000..e64702005 --- /dev/null +++ b/config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-metadata-grpc-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines +spec: + replicas: 1 + selector: + matchLabels: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines + template: + metadata: + labels: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines + spec: + containers: + - args: + - --grpc_port={{.MLMD.GRPC.Port}} + - --mysql_config_database=$(MYSQL_DATABASE) + - --mysql_config_host=$(MYSQL_HOST) + - --mysql_config_port=$(MYSQL_PORT) + - --mysql_config_user=$(DBCONFIG_USER) + - --mysql_config_password=$(DBCONFIG_PASSWORD) + - --enable_database_upgrade=true + command: + - /bin/metadata_store_server + env: + - name: DBCONFIG_USER + value: "{{.DBConnection.Username}}" + - name: DBCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "{{.DBConnection.CredentialsSecret.Key}}" + name: "{{.DBConnection.CredentialsSecret.Name}}" + - name: MYSQL_DATABASE + value: "{{.DBConnection.DBName}}" + - name: MYSQL_HOST + value: "{{.DBConnection.Host}}" + - name: MYSQL_PORT + value: "{{.DBConnection.Port}}" + image: {{.MLMD.GRPC.Image}} + name: container + ports: + - containerPort: {{.MLMD.GRPC.Port}} + name: grpc-api + livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 5 + tcpSocket: + port: grpc-api + timeoutSeconds: 2 + readinessProbe: + initialDelaySeconds: 3 + periodSeconds: 5 + tcpSocket: + port: grpc-api + timeoutSeconds: 2 + resources: + {{ if .MLMD.GRPC.Resources.Requests }} + requests: + {{ if .MLMD.GRPC.Resources.Requests.CPU }} + cpu: {{.MLMD.GRPC.Resources.Requests.CPU}} + {{ end }} + {{ if .MLMD.GRPC.Resources.Requests.Memory }} + memory: {{.MLMD.GRPC.Resources.Requests.Memory}} + {{ end }} + {{ end }} + {{ if .MLMD.GRPC.Resources.Limits }} + limits: + {{ if .MLMD.GRPC.Resources.Limits.CPU }} + cpu: {{.MLMD.GRPC.Resources.Limits.CPU}} + {{ end }} + {{ if .MLMD.GRPC.Resources.Limits.Memory }} + memory: {{.MLMD.GRPC.Resources.Limits.Memory}} + {{ end }} + {{ end }} + serviceAccountName: ds-pipeline-metadata-grpc-{{.Name}} diff --git a/config/internal/ml-metadata/metadata-grpc.service.yaml.tmpl b/config/internal/ml-metadata/metadata-grpc.service.yaml.tmpl new file mode 100644 index 000000000..608ebe627 --- /dev/null +++ b/config/internal/ml-metadata/metadata-grpc.service.yaml.tmpl @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: ds-pipeline-metadata-grpc-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines +spec: + ports: + - name: grpc-api + port: {{.MLMD.GRPC.Port}} + protocol: TCP + selector: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines + type: ClusterIP diff --git a/config/internal/ml-metadata/metadata-grpc.serviceaccount.yaml.tmpl b/config/internal/ml-metadata/metadata-grpc.serviceaccount.yaml.tmpl new file mode 100644 index 000000000..2af741fe7 --- /dev/null +++ b/config/internal/ml-metadata/metadata-grpc.serviceaccount.yaml.tmpl @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ds-pipeline-metadata-grpc-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines diff --git a/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl b/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl new file mode 100644 index 000000000..d6ff143cd --- /dev/null +++ b/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-metadata-writer-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-writer-{{.Name}} + component: data-science-pipelines +spec: + replicas: 1 + selector: + matchLabels: + app: ds-pipeline-metadata-writer-{{.Name}} + component: data-science-pipelines + template: + metadata: + labels: + app: ds-pipeline-metadata-writer-{{.Name}} + component: data-science-pipelines + spec: + containers: + - env: + - name: NAMESPACE_TO_WATCH + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PIPELINE_RUNTIME + value: tekton + - name: ARCHIVE_LOGS + value: "{{.APIServer.ArchiveLogs}}" + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-{{.Name}}" + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "{{.MLMD.GRPC.Port}}" + image: "{{.MLMD.Writer.Image}}" + name: main + livenessProbe: + exec: + command: + - pidof + - python3 + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - pidof + - python3 + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + {{ if .MLMD.Writer.Resources.Requests }} + requests: + {{ if .MLMD.Writer.Resources.Requests.CPU }} + cpu: {{.MLMD.Writer.Resources.Requests.CPU}} + {{ end }} + {{ if .MLMD.Writer.Resources.Requests.Memory }} + memory: {{.MLMD.Writer.Resources.Requests.Memory}} + {{ end }} + {{ end }} + {{ if .MLMD.Writer.Resources.Limits }} + limits: + {{ if .MLMD.Writer.Resources.Limits.CPU }} + cpu: {{.MLMD.Writer.Resources.Limits.CPU}} + {{ end }} + {{ if .MLMD.Writer.Resources.Limits.Memory }} + memory: {{.MLMD.Writer.Resources.Limits.Memory}} + {{ end }} + {{ end }} + serviceAccountName: ds-pipeline-metadata-writer-{{.Name}} diff --git a/config/internal/ml-metadata/metadata-writer.role.yaml.tmpl b/config/internal/ml-metadata/metadata-writer.role.yaml.tmpl new file mode 100644 index 000000000..05becbf37 --- /dev/null +++ b/config/internal/ml-metadata/metadata-writer.role.yaml.tmpl @@ -0,0 +1,48 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: ds-pipeline-metadata-writer-{{.Name}} + component: data-science-pipelines + name: ds-pipeline-metadata-writer-{{.Name}} + namespace: {{.Namespace}} + +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - apiGroups: + - argoproj.io + resources: + - workflows + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - tekton.dev + resources: + - pipelineruns + - taskruns + - conditions + verbs: + - get + - list + - watch + - update + - patch diff --git a/config/internal/ml-metadata/metadata-writer.rolebinding.yaml.tmpl b/config/internal/ml-metadata/metadata-writer.rolebinding.yaml.tmpl new file mode 100644 index 000000000..1a96fd356 --- /dev/null +++ b/config/internal/ml-metadata/metadata-writer.rolebinding.yaml.tmpl @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: ds-pipeline-metadata-writer-{{.Name}} + component: data-science-pipelines + name: ds-pipeline-metadata-writer-{{.Name}} + namespace: {{.Namespace}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ds-pipeline-metadata-writer-{{.Name}} +subjects: + - kind: ServiceAccount + name: ds-pipeline-metadata-writer-{{.Name}} diff --git a/config/internal/ml-metadata/metadata-writer.serviceaccount.yaml.tmpl b/config/internal/ml-metadata/metadata-writer.serviceaccount.yaml.tmpl new file mode 100644 index 000000000..f46131828 --- /dev/null +++ b/config/internal/ml-metadata/metadata-writer.serviceaccount.yaml.tmpl @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ds-pipeline-metadata-writer-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-writer-{{.Name}} + component: data-science-pipelines diff --git a/config/internal/mlpipelines-ui/deployment.yaml.tmpl b/config/internal/mlpipelines-ui/deployment.yaml.tmpl index aa9deebc9..c4f49b727 100644 --- a/config/internal/mlpipelines-ui/deployment.yaml.tmpl +++ b/config/internal/mlpipelines-ui/deployment.yaml.tmpl @@ -45,6 +45,10 @@ spec: value: ds-pipeline-{{.Name}} - name: ML_PIPELINE_SERVICE_PORT value: '8888' + - name: METADATA_ENVOY_SERVICE_SERVICE_HOST + value: ds-pipeline-metadata-envoy-{{.Name}} + - name: METADATA_ENVOY_SERVICE_SERVICE_PORT + value: "9090" image: {{.MlPipelineUI.Image}} imagePullPolicy: IfNotPresent livenessProbe: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 6be04fc11..546488fd0 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -56,6 +56,12 @@ spec: value: $(IMAGES_MOVERESULTSIMAGE) - name: IMAGES_MARIADB value: $(IMAGES_MARIADB) + - name: IMAGES_MLMDENVOY + value: $(IMAGES_MLMDENVOY) + - name: IMAGES_MLMDGRPC + value: $(IMAGES_MLMDGRPC) + - name: IMAGES_MLMDWRITER + value: $(IMAGES_MLMDWRITER) securityContext: allowPrivilegeEscalation: false capabilities: diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 964e84eaa..628445ae9 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -48,6 +48,8 @@ const ( ObjectStorageSecretName = "mlpipeline-minio-artifact" // hardcoded in kfp-tekton ObjectStorageAccessKey = "accesskey" ObjectStorageSecretKey = "secretkey" + + MlmdGrpcPort = "8080" ) // DSPO Config File Paths @@ -60,6 +62,9 @@ const ( APIServerMoveResultsImagePath = "Images.MoveResultsImage" MariaDBImagePath = "Images.MariaDB" OAuthProxyImagePath = "Images.OAuthProxy" + MlmdEnvoyImagePath = "Images.MlmdEnvoy" + MlmdGRPCImagePath = "Images.MlmdGRPC" + MlmdWriterImagePath = "Images.MlmdWriter" ) const ( @@ -96,6 +101,9 @@ var ( MariaDBResourceRequirements = createResourceRequirement(resource.MustParse("300m"), resource.MustParse("800Mi"), resource.MustParse("1"), resource.MustParse("1Gi")) MinioResourceRequirements = createResourceRequirement(resource.MustParse("200m"), resource.MustParse("100Mi"), resource.MustParse("250m"), resource.MustParse("1Gi")) MlPipelineUIResourceRequirements = createResourceRequirement(resource.MustParse("100m"), resource.MustParse("256Mi"), resource.MustParse("100m"), resource.MustParse("256Mi")) + MlmdEnvoyResourceRequirements = createResourceRequirement(resource.MustParse("100m"), resource.MustParse("256Mi"), resource.MustParse("100m"), resource.MustParse("256Mi")) + MlmdGRPCResourceRequirements = createResourceRequirement(resource.MustParse("100m"), resource.MustParse("256Mi"), resource.MustParse("100m"), resource.MustParse("256Mi")) + MlmdWriterResourceRequirements = createResourceRequirement(resource.MustParse("100m"), resource.MustParse("256Mi"), resource.MustParse("100m"), resource.MustParse("256Mi")) ) func createResourceRequirement(RequestsCPU resource.Quantity, RequestsMemory resource.Quantity, LimitsCPU resource.Quantity, LimitsMemory resource.Quantity) dspav1alpha1.ResourceRequirements { diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 730bb7b84..90e47dd51 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -50,7 +50,7 @@ type DSPAReconciler struct { func (r *DSPAReconciler) Apply(owner mf.Owner, params *DSPAParams, template string, fns ...mf.Transformer) error { tmplManifest, err := config.Manifest(r.Client, r.TemplatesPath+template, params) if err != nil { - return fmt.Errorf("error loading template yaml: %w", err) + return fmt.Errorf("error loading template (%s) yaml: %w", template, err) } tmplManifest, err = tmplManifest.Transform( mf.InjectOwner(owner), @@ -70,7 +70,7 @@ func (r *DSPAReconciler) Apply(owner mf.Owner, params *DSPAParams, template stri func (r *DSPAReconciler) ApplyWithoutOwner(params *DSPAParams, template string, fns ...mf.Transformer) error { tmplManifest, err := config.Manifest(r.Client, r.TemplatesPath+template, params) if err != nil { - return fmt.Errorf("error loading template yaml: %w", err) + return fmt.Errorf("error loading template (%s) yaml: %w", template, err) } tmplManifest, err = tmplManifest.Transform(fns...) @@ -84,7 +84,7 @@ func (r *DSPAReconciler) ApplyWithoutOwner(params *DSPAParams, template string, func (r *DSPAReconciler) DeleteResource(params *DSPAParams, template string, fns ...mf.Transformer) error { tmplManifest, err := config.Manifest(r.Client, r.TemplatesPath+template, params) if err != nil { - return fmt.Errorf("error loading template yaml: %w", err) + return fmt.Errorf("error loading template (%s) yaml: %w", template, err) } tmplManifest, err = tmplManifest.Transform(fns...) @@ -278,6 +278,11 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, err } + err = r.ReconcileMLMD(dspa, params) + if err != nil { + return ctrl.Result{}, err + } + log.Info("Updating CR status") // Refresh DSPA before updating err = r.Get(ctx, req.NamespacedName, dspa) diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 2854f66ce..0a35e0e14 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -46,6 +46,7 @@ type DSPAParams struct { MlPipelineUI *dspa.MlPipelineUI MariaDB *dspa.MariaDB Minio *dspa.Minio + MLMD *dspa.MLMD DBConnection ObjectStorageConnection } @@ -87,6 +88,13 @@ func (p *DSPAParams) UsingExternalStorage(dsp *dspa.DataSciencePipelinesApplicat return false } +func (p *DSPAParams) UsingMLMD(dsp *dspa.DataSciencePipelinesApplication) bool { + if dsp.Spec.MLMD != nil { + return dsp.Spec.MLMD.Deploy + } + return false +} + func passwordGen(n int) string { rand.Seed(time.Now().UnixNano()) var chars = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890") @@ -337,6 +345,41 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc return nil } +func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelinesApplication, client client.Client, log logr.Logger) error { + if p.MLMD != nil { + if p.MLMD.Envoy == nil { + p.MLMD.Envoy = &dspa.Envoy{ + Image: config.GetStringConfigWithDefault(config.MlmdEnvoyImagePath, config.DefaultImageValue), + } + } + if p.MLMD.GRPC == nil { + p.MLMD.GRPC = &dspa.GRPC{ + Image: config.GetStringConfigWithDefault(config.MlmdGRPCImagePath, config.DefaultImageValue), + } + } + if p.MLMD.Writer == nil { + p.MLMD.Writer = &dspa.Writer{ + Image: config.GetStringConfigWithDefault(config.MlmdWriterImagePath, config.DefaultImageValue), + } + } + + mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(config.MlmdEnvoyImagePath, config.DefaultImageValue) + mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(config.MlmdGRPCImagePath, config.DefaultImageValue) + mlmdWriterImageFromConfig := config.GetStringConfigWithDefault(config.MlmdWriterImagePath, config.DefaultImageValue) + + setStringDefault(mlmdEnvoyImageFromConfig, &p.MLMD.Envoy.Image) + setStringDefault(mlmdGRPCImageFromConfig, &p.MLMD.GRPC.Image) + setStringDefault(mlmdWriterImageFromConfig, &p.MLMD.Writer.Image) + + setResourcesDefault(config.MlmdEnvoyResourceRequirements, &p.MLMD.Envoy.Resources) + setResourcesDefault(config.MlmdGRPCResourceRequirements, &p.MLMD.GRPC.Resources) + setResourcesDefault(config.MlmdWriterResourceRequirements, &p.MLMD.Writer.Resources) + + setStringDefault(config.MlmdGrpcPort, &p.MLMD.GRPC.Port) + } + return nil +} + func setStringDefault(defaultValue string, value *string) { if *value == "" { *value = defaultValue @@ -361,6 +404,7 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip p.MariaDB = dsp.Spec.MariaDB.DeepCopy() p.Minio = dsp.Spec.Minio.DeepCopy() p.OAuthProxy = config.GetStringConfigWithDefault(config.OAuthProxyImagePath, config.DefaultImageValue) + p.MLMD = dsp.Spec.MLMD.DeepCopy() // TODO: If p. is nil we should create defaults @@ -404,7 +448,12 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip setResourcesDefault(config.MlPipelineUIResourceRequirements, &p.MlPipelineUI.Resources) } - err := p.SetupDBParams(ctx, dsp, client, log) + err := p.SetupMLMD(ctx, dsp, client, log) + if err != nil { + return err + } + + err = p.SetupDBParams(ctx, dsp, client, log) if err != nil { return err } diff --git a/controllers/mlmd.go b/controllers/mlmd.go new file mode 100644 index 000000000..08630a7b9 --- /dev/null +++ b/controllers/mlmd.go @@ -0,0 +1,51 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" +) + +var mlmdTemplates = []string{ + "ml-metadata/metadata-envoy.deployment.yaml.tmpl", + "ml-metadata/metadata-envoy.service.yaml.tmpl", + "ml-metadata/metadata-grpc.deployment.yaml.tmpl", + "ml-metadata/metadata-grpc.service.yaml.tmpl", + "ml-metadata/metadata-grpc.serviceaccount.yaml.tmpl", + "ml-metadata/metadata-writer.deployment.yaml.tmpl", + "ml-metadata/metadata-writer.role.yaml.tmpl", + "ml-metadata/metadata-writer.rolebinding.yaml.tmpl", + "ml-metadata/metadata-writer.serviceaccount.yaml.tmpl", +} + +func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApplication, + params *DSPAParams) error { + + log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) + + if params.UsingMLMD(dsp) { + log.Info("Applying ML-Metadata (MLMD) Resources") + + for _, template := range mlmdTemplates { + err := r.Apply(dsp, params, template) + if err != nil { + return err + } + } + log.Info("Finished applying MLMD Resources") + } + return nil +} diff --git a/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml index f7060c384..06cfba857 100644 --- a/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml @@ -46,6 +46,10 @@ spec: value: ds-pipeline-testdsp2 - name: ML_PIPELINE_SERVICE_PORT value: '8888' + - name: METADATA_ENVOY_SERVICE_SERVICE_HOST + value: ds-pipeline-metadata-envoy-testdsp2 + - name: METADATA_ENVOY_SERVICE_SERVICE_PORT + value: "9090" image: frontend:test2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml index 721891e96..abc6f1f0c 100644 --- a/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml @@ -46,6 +46,10 @@ spec: value: ds-pipeline-testdsp4 - name: ML_PIPELINE_SERVICE_PORT value: '8888' + - name: METADATA_ENVOY_SERVICE_SERVICE_HOST + value: ds-pipeline-metadata-envoy-testdsp4 + - name: METADATA_ENVOY_SERVICE_SERVICE_PORT + value: "9090" image: this-frontend-image-from-cr-should-be-used:test4 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 1f2a10680..7bb962d86 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -24,6 +24,12 @@ spec: value: registry.redhat.io/rhel8/mariadb-103:1-188 - name: IMAGES_OAUTHPROXY value: registry.redhat.io/openshift4/ose-oauth-proxy:v4.12.0 + - name: IMAGES_MLMDENVOY + value: quay.io/opendatahub/ds-pipelines-metadata-envoy:1.7.0 + - name: IMAGES_MLMDGRPC + value: quay.io/opendatahub/ds-pipelines-metadata-grpc:1.0.0 + - name: IMAGES_MLMDWRITER + value: quay.io/opendatahub/ds-pipelines-metadata-writer:1.1.0 repoRef: name: manifests path: config From 0b3f10a34dc878eaf40b27d834db4e18919e2654 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 8 Jun 2023 16:04:57 -0400 Subject: [PATCH 03/19] Add dynamic mlmd-envoy config - Removes requirement for mlmd-grpc Service to be explicitly named `metadata-grpc-service` --- .../metadata-envoy.configmap.yaml.tmpl | 54 +++++++++++++++++++ .../metadata-envoy.deployment.yaml.tmpl | 8 +++ controllers/mlmd.go | 1 + 3 files changed, 63 insertions(+) create mode 100644 config/internal/ml-metadata/metadata-envoy.configmap.yaml.tmpl diff --git a/config/internal/ml-metadata/metadata-envoy.configmap.yaml.tmpl b/config/internal/ml-metadata/metadata-envoy.configmap.yaml.tmpl new file mode 100644 index 000000000..82ae945a6 --- /dev/null +++ b/config/internal/ml-metadata/metadata-envoy.configmap.yaml.tmpl @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ds-pipeline-metadata-envoy-config-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-envoy-{{.Name}} + component: data-science-pipelines +data: + envoy.yaml: |- + admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: { address: 0.0.0.0, port_value: 9901 } + + static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 0.0.0.0, port_value: 9090 } + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: { prefix: "/" } + route: + cluster: metadata-cluster + max_grpc_timeout: 0s + cors: + allow_origin: + - "*" + allow_methods: GET, PUT, DELETE, POST, OPTIONS + allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,custom-header-1,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout + max_age: "1728000" + expose_headers: custom-header-1,grpc-status,grpc-message + http_filters: + - name: envoy.grpc_web + - name: envoy.cors + - name: envoy.router + clusters: + - name: metadata-cluster + connect_timeout: 30.0s + type: logical_dns + http2_protocol_options: {} + lb_policy: round_robin + hosts: [{ socket_address: { address: "ds-pipeline-metadata-grpc-{{.Name}}", port_value: {{.MLMD.GRPC.Port}} }}] diff --git a/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl b/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl index e109b767a..a7b1cc888 100644 --- a/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl +++ b/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl @@ -59,3 +59,11 @@ spec: memory: {{.MLMD.Envoy.Resources.Limits.Memory}} {{ end }} {{ end }} + volumeMounts: + - mountPath: /etc/envoy.yaml + name: envoy-config + subPath: envoy.yaml + volumes: + - name: envoy-config + configMap: + name: ds-pipeline-metadata-envoy-config-{{.Name}} diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 08630a7b9..1823f0131 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -20,6 +20,7 @@ import ( ) var mlmdTemplates = []string{ + "ml-metadata/metadata-envoy.configmap.yaml.tmpl", "ml-metadata/metadata-envoy.deployment.yaml.tmpl", "ml-metadata/metadata-envoy.service.yaml.tmpl", "ml-metadata/metadata-grpc.deployment.yaml.tmpl", From 646bbee00c493e0b96722bd27e580d415e77a9d9 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Thu, 8 Jun 2023 15:24:53 -0400 Subject: [PATCH 04/19] Update OCP pipelines pre-req version req. --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c37dca708..9225b1562 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,9 @@ To get started you will first need to satisfy the following pre-requisites: ## Pre-requisites 1. An OpenShift cluster that is 4.9 or higher. 2. You will need to be logged into this cluster as [cluster admin] via [oc client]. -3. The OpenShift Cluster must have OpenShift Pipelines 1.7.2 or higher installed. Instructions [here][OCP Pipelines Operator]. +3. The OpenShift Cluster must have OpenShift Pipelines 1.8 or higher installed. We recommend channel pipelines-1.8 + on OCP 4.10 and pipelines-1.9 or pipelines-1.10 for OCP 4.11, 4.12 and 4.13. + Instructions [here][OCP Pipelines Operator]. 4. Based on installation type you will need one of the following: 1. For Standalone method: You will need to have [Kustomize] version 4.5+ installed 2. For ODH method: The Open Data Hub operator needs to be installed. You can install it via [OperatorHub][installodh]. @@ -260,7 +262,7 @@ see these logs after clicking this step and navigating to "Logs." ## Using the API -> Note: By default we use kfp-tekton v1.4 for this section so you will need [kfp-tekton v1.4.x sdk installed][kfp-tekton] +> Note: By default we use kfp-tekton 1.5.x for this section so you will need [kfp-tekton v1.5.x sdk installed][kfp-tekton] > in your environment In the previous step we submitted a generated `Pipeline` yaml via the GUI. We can also submit the `Pipeline` code From c301b4ea2aa5ebd5fc12eb3516d74782bb82596a Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 12 Jun 2023 17:41:16 -0400 Subject: [PATCH 05/19] Add MLMD Unit Test --- .../testdata/declarative/case_5/config.yaml | 15 ++ .../declarative/case_5/deploy/cr.yaml | 19 ++ .../created/apiserver_deployment.yaml | 202 ++++++++++++++++++ .../created/configmap_artifact_script.yaml | 28 +++ .../expected/created/mariadb_deployment.yaml | 76 +++++++ .../created/metadata-envoy_deployment.yaml | 60 ++++++ .../created/metadata-grpc_deployment.yaml | 71 ++++++ .../created/metadata-writer_deployment.yaml | 60 ++++++ .../created/mlpipelines-ui_deployment.yaml | 150 +++++++++++++ .../created/persistence-agent_deployment.yaml | 63 ++++++ .../scheduled-workflow_deployment.yaml | 58 +++++ 11 files changed, 802 insertions(+) create mode 100644 controllers/testdata/declarative/case_5/config.yaml create mode 100644 controllers/testdata/declarative/case_5/deploy/cr.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml create mode 100644 controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml diff --git a/controllers/testdata/declarative/case_5/config.yaml b/controllers/testdata/declarative/case_5/config.yaml new file mode 100644 index 000000000..4cb489ea3 --- /dev/null +++ b/controllers/testdata/declarative/case_5/config.yaml @@ -0,0 +1,15 @@ +# When a minimal DSPA is deployed +Images: + ApiServer: api-server:test5 + Artifact: artifact-manager:test5 + PersistentAgent: persistenceagent:test5 + ScheduledWorkflow: scheduledworkflow:test5 + Cache: ubi-minimal:test5 + MoveResultsImage: busybox:test5 + MlPipelineUI: frontend:test5 + MariaDB: mariadb:test5 + Minio: minio:test5 + OAuthProxy: oauth-proxy:test5 + MlmdEnvoy: metadata-envoy:changeme + MlmdGrpc: metadata-grpc:changeme + MlmdWriter: metadata-grpc:changeme diff --git a/controllers/testdata/declarative/case_5/deploy/cr.yaml b/controllers/testdata/declarative/case_5/deploy/cr.yaml new file mode 100644 index 000000000..98dd89e0a --- /dev/null +++ b/controllers/testdata/declarative/case_5/deploy/cr.yaml @@ -0,0 +1,19 @@ +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: testdsp5 +spec: + objectStorage: + minio: + image: minio:test5 + mlpipelineUI: + image: frontend:test5 + mlmd: + deploy: true + envoy: + image: metadata-envoy:test5 + grpc: + image: metadata-grpc:test5 + port: "1337" + writer: + image: metadata-writer:test5 diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml new file mode 100644 index 000000000..7ecef37aa --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -0,0 +1,202 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-testdsp5 + namespace: default + labels: + app: ds-pipeline-testdsp5 + component: data-science-pipelines +spec: + selector: + matchLabels: + app: ds-pipeline-testdsp5 + component: data-science-pipelines + template: + metadata: + labels: + app: ds-pipeline-testdsp5 + component: data-science-pipelines + spec: + containers: + - env: + - name: POD_NAMESPACE + value: "default" + - name: DBCONFIG_USER + value: "mlpipeline" + - name: DBCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp5" + - name: DBCONFIG_DBNAME + value: "mlpipeline" + - name: DBCONFIG_HOST + value: "mariadb-testdsp5.default.svc.cluster.local" + - name: DBCONFIG_PORT + value: "3306" + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp5.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp5" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test5" + - name: ARCHIVE_LOGS + value: "false" + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp5" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" + - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION + value: "true" + - name: DBCONFIG_CONMAXLIFETIMESEC + value: "120" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" + - name: OBJECTSTORECONFIG_BUCKETNAME + value: "mlpipeline" + - name: OBJECTSTORECONFIG_ACCESSKEY + valueFrom: + secretKeyRef: + key: "accesskey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECRETACCESSKEY + valueFrom: + secretKeyRef: + key: "secretkey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECURE + value: "false" + - name: MINIO_SERVICE_SERVICE_HOST + value: "minio-testdsp5.default.svc.cluster.local" + - name: MINIO_SERVICE_SERVICE_PORT + value: "9000" + - name: CACHE_IMAGE + value: "ubi-minimal:test5" + - name: MOVERESULTS_IMAGE + value: "busybox:test5" + image: api-server:test5 + imagePullPolicy: Always + name: ds-pipeline-api-server + ports: + - containerPort: 8888 + name: http + protocol: TCP + - containerPort: 8887 + name: grpc + protocol: TCP + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 250m + memory: 500Mi + limits: + cpu: 500m + memory: 1Gi + volumeMounts: + - mountPath: /config/sample_config.json + name: sample-config + subPath: sample_config.json + - mountPath: /samples/ + name: sample-pipeline + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp5 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp5","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp5","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test5 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + volumes: + - name: proxy-tls + secret: + secretName: ds-pipelines-proxy-tls-testdsp5 + defaultMode: 420 + - configMap: + defaultMode: 420 + name: sample-config-testdsp5 + name: sample-config + - configMap: + defaultMode: 420 + name: sample-pipeline-testdsp5 + name: sample-pipeline + + serviceAccountName: ds-pipeline-testdsp5 diff --git a/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml new file mode 100644 index 000000000..1220718c8 --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +data: + artifact_script: |- + #!/usr/bin/env sh + push_artifact() { + if [ -f "$2" ]; then + tar -cvzf $1.tgz $2 + aws s3 --endpoint http://minio-testdsp5.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + else + echo "$2 file does not exist. Skip artifact tracking for $1" + fi + } + push_log() { + cat /var/log/containers/$PODNAME*$NAMESPACE*step-main*.log > step-main.log + push_artifact main-log step-main.log + } + strip_eof() { + if [ -f "$2" ]; then + awk 'NF' $2 | head -c -1 > $1_temp_save && cp $1_temp_save $2 + fi + } +kind: ConfigMap +metadata: + name: ds-pipeline-artifact-script-testdsp5 + namespace: default + labels: + app: ds-pipeline-testdsp5 + component: data-science-pipelines diff --git a/controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml new file mode 100644 index 000000000..94df4dafc --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml @@ -0,0 +1,76 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mariadb-testdsp5 + namespace: default + labels: + app: mariadb-testdsp5 + component: data-science-pipelines +spec: + strategy: + type: Recreate # Need this since backing PVC is ReadWriteOnce, which creates resource lock condition in default Rolling strategy + selector: + matchLabels: + app: mariadb-testdsp5 + component: data-science-pipelines + template: + metadata: + labels: + app: mariadb-testdsp5 + component: data-science-pipelines + spec: + containers: + - name: mariadb + image: mariadb:test5 + ports: + - containerPort: 3306 + protocol: TCP + readinessProbe: + exec: + command: + - /bin/sh + - "-i" + - "-c" + - >- + MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D + $MYSQL_DATABASE -e 'SELECT 1' + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3306 + timeoutSeconds: 1 + env: + - name: MYSQL_USER + value: "mlpipeline" + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp5" + - name: MYSQL_DATABASE + value: "mlpipeline" + - name: MYSQL_ALLOW_EMPTY_PASSWORD + value: "true" + resources: + requests: + cpu: 300m + memory: 800Mi + limits: + cpu: "1" + memory: 1Gi + volumeMounts: + - name: mariadb-persistent-storage + mountPath: /var/lib/mysql + volumes: + - name: mariadb-persistent-storage + persistentVolumeClaim: + claimName: mariadb-testdsp5 diff --git a/controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml new file mode 100644 index 000000000..b8dfebe7a --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-metadata-envoy-testdsp5 + namespace: default + labels: + app: ds-pipeline-metadata-envoy-testdsp5 + component: data-science-pipelines +spec: + replicas: 1 + selector: + matchLabels: + app: ds-pipeline-metadata-envoy-testdsp5 + component: data-science-pipelines + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + labels: + app: ds-pipeline-metadata-envoy-testdsp5 + component: data-science-pipelines + spec: + containers: + - image: metadata-envoy:test5 + name: container + ports: + - containerPort: 9090 + name: md-envoy + protocol: TCP + - containerPort: 9901 + name: envoy-admin + protocol: TCP + livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 5 + tcpSocket: + port: md-envoy + timeoutSeconds: 2 + readinessProbe: + initialDelaySeconds: 3 + periodSeconds: 5 + tcpSocket: + port: md-envoy + timeoutSeconds: 2 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/envoy.yaml + name: envoy-config + subPath: envoy.yaml + volumes: + - name: envoy-config + configMap: + name: ds-pipeline-metadata-envoy-config-testdsp5 + defaultMode: 420 diff --git a/controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml new file mode 100644 index 000000000..6c202e0ff --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-metadata-grpc-testdsp5 + namespace: default + labels: + app: ds-pipeline-metadata-grpc-testdsp5 + component: data-science-pipelines +spec: + replicas: 1 + selector: + matchLabels: + app: ds-pipeline-metadata-grpc-testdsp5 + component: data-science-pipelines + template: + metadata: + labels: + app: ds-pipeline-metadata-grpc-testdsp5 + component: data-science-pipelines + spec: + containers: + - args: + - --grpc_port=1337 + - --mysql_config_database=$(MYSQL_DATABASE) + - --mysql_config_host=$(MYSQL_HOST) + - --mysql_config_port=$(MYSQL_PORT) + - --mysql_config_user=$(DBCONFIG_USER) + - --mysql_config_password=$(DBCONFIG_PASSWORD) + - --enable_database_upgrade=true + command: + - /bin/metadata_store_server + env: + - name: DBCONFIG_USER + value: "mlpipeline" + - name: DBCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp5" + - name: MYSQL_DATABASE + value: "mlpipeline" + - name: MYSQL_HOST + value: mariadb-testdsp5.default.svc.cluster.local + - name: MYSQL_PORT + value: "3306" + image: metadata-grpc:test5 + name: container + ports: + - containerPort: 1337 + name: grpc-api + protocol: TCP + livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 5 + tcpSocket: + port: grpc-api + timeoutSeconds: 2 + readinessProbe: + initialDelaySeconds: 3 + periodSeconds: 5 + tcpSocket: + port: grpc-api + timeoutSeconds: 2 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + serviceAccountName: ds-pipeline-metadata-grpc-testdsp5 diff --git a/controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml new file mode 100644 index 000000000..4248db028 --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-metadata-writer-testdsp5 + namespace: default + labels: + app: ds-pipeline-metadata-writer-testdsp5 + component: data-science-pipelines +spec: + replicas: 1 + selector: + matchLabels: + app: ds-pipeline-metadata-writer-testdsp5 + component: data-science-pipelines + template: + metadata: + labels: + app: ds-pipeline-metadata-writer-testdsp5 + component: data-science-pipelines + spec: + containers: + - env: + - name: NAMESPACE_TO_WATCH + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: PIPELINE_RUNTIME + value: tekton + - name: ARCHIVE_LOGS + value: "false" + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-testdsp5" + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "1337" + image: metadata-writer:test5 + name: main + livenessProbe: + exec: + command: + - pidof + - python3 + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - pidof + - python3 + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi diff --git a/controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml new file mode 100644 index 000000000..d9664634f --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml @@ -0,0 +1,150 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-ui-testdsp5 + namespace: default + labels: + app: ds-pipeline-ui-testdsp5 + component: data-science-pipelines +spec: + selector: + matchLabels: + app: ds-pipeline-ui-testdsp5 + component: data-science-pipelines + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-ui-testdsp5 + component: data-science-pipelines + spec: + containers: + - env: + - name: VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH + value: /etc/config/viewer-pod-template.json + - name: MINIO_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + key: "accesskey" + name: "mlpipeline-minio-artifact" + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + key: "secretkey" + name: "mlpipeline-minio-artifact" + - name: ALLOW_CUSTOM_VISUALIZATIONS + value: "true" + - name: ARGO_ARCHIVE_LOGS + value: "true" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp5 + - name: ML_PIPELINE_SERVICE_PORT + value: '8888' + - name: METADATA_ENVOY_SERVICE_SERVICE_HOST + value: ds-pipeline-metadata-envoy-testdsp5 + - name: METADATA_ENVOY_SERVICE_SERVICE_PORT + value: "9090" + image: frontend:test5 + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:3000/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + name: ds-pipeline-ui + ports: + - containerPort: 3000 + protocol: TCP + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:3000/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/config + name: config-volume + readOnly: true + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-ui-testdsp5 + - --upstream=http://localhost:3000 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp5","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp5","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test5 + ports: + - containerPort: 8443 + name: https + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + serviceAccountName: ds-pipeline-ui-testdsp5 + volumes: + - configMap: + name: ds-pipeline-ui-configmap-testdsp5 + defaultMode: 420 + name: config-volume + - name: proxy-tls + secret: + secretName: ds-pipelines-ui-proxy-tls-testdsp5 + defaultMode: 420 diff --git a/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml new file mode 100644 index 000000000..0d4ccd5d9 --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-persistenceagent-testdsp5 + namespace: default + labels: + app: ds-pipeline-persistenceagent-testdsp5 + component: data-science-pipelines +spec: + selector: + matchLabels: + app: ds-pipeline-persistenceagent-testdsp5 + component: data-science-pipelines + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-persistenceagent-testdsp5 + component: data-science-pipelines + spec: + containers: + - env: + - name: NAMESPACE + value: "default" + image: persistenceagent:test5 + imagePullPolicy: IfNotPresent + name: ds-pipeline-persistenceagent + command: + - persistence_agent + - "--logtostderr=true" + - "--ttlSecondsAfterWorkflowFinish=86400" + - "--numWorker=2" + - "--mlPipelineAPIServerName=ds-pipeline-testdsp5" + - "--namespace=default" + - "--mlPipelineServiceHttpPort=8888" + - "--mlPipelineServiceGRPCPort=8887" + livenessProbe: + exec: + command: + - test + - -x + - persistence_agent + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - test + - -x + - persistence_agent + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 120m + memory: 500Mi + limits: + cpu: 250m + memory: 1Gi + serviceAccountName: ds-pipeline-persistenceagent-testdsp5 diff --git a/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml new file mode 100644 index 000000000..fb06409b0 --- /dev/null +++ b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-scheduledworkflow-testdsp5 + namespace: default + labels: + app: ds-pipeline-scheduledworkflow-testdsp5 + component: data-science-pipelines +spec: + selector: + matchLabels: + app: ds-pipeline-scheduledworkflow-testdsp5 + component: data-science-pipelines + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-scheduledworkflow-testdsp5 + component: data-science-pipelines + spec: + containers: + - env: + - name: CRON_SCHEDULE_TIMEZONE + value: "UTC" + image: scheduledworkflow:test5 + imagePullPolicy: IfNotPresent + name: ds-pipeline-scheduledworkflow + command: + - controller + - "--logtostderr=true" + - "--namespace=testdsp5" + livenessProbe: + exec: + command: + - test + - -x + - controller + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - test + - -x + - controller + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 120m + memory: 100Mi + limits: + cpu: 250m + memory: 250Mi + serviceAccountName: ds-pipeline-scheduledworkflow-testdsp5 From 806e139aca0a347861fa3772fad48586440ce8ec Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 12 Jun 2023 17:46:29 -0400 Subject: [PATCH 06/19] Update DSPA "All Fields" CR sample --- config/samples/dspa_all_fields.yaml | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/config/samples/dspa_all_fields.yaml b/config/samples/dspa_all_fields.yaml index ed791c8a7..e765758d8 100644 --- a/config/samples/dspa_all_fields.yaml +++ b/config/samples/dspa_all_fields.yaml @@ -128,6 +128,36 @@ spec: # secretName: somesecret-db-sample # accessKey: somekey # secretKey: somekey + mlmd: # Deploys an optional ML-Metadata Component + deploy: true + envoy: + image: quay.io/opendatahub/ds-pipelines-metadata-envoy:1.7.0 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + grpc: + image: quay.io/opendatahub/ds-pipelines-metadata-grpc:1.0.0 + port: "8080" + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + writer: + image: quay.io/opendatahub/ds-pipelines-metadata-writer:1.1.0 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi status: # Reports True iff: # * ApiServerReady, PersistenceAgentReady, ScheduledWorkflowReady, DatabaseReady, ObjectStorageReady report True From b1dcd59c9f40b95e495ff9ed1401df5f61c7cf50 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 12 Jun 2023 18:03:39 -0400 Subject: [PATCH 07/19] Update README with details on deploying optional components --- README.md | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9225b1562..d9eb7eae1 100644 --- a/README.md +++ b/README.md @@ -197,15 +197,83 @@ When a `DataSciencePipelinesApplication` is deployed, the following components a * APIServer * Persistence Agent * Scheduled Workflow controller -* MLPipelines UI If specified in the `DataSciencePipelinesApplication` resource, the following components may also be additionally deployed: * MariaDB * Minio +* MLPipelines UI +* MLMD (ML Metadata) To understand how these components interact with each other please refer to the upstream [Kubeflow Pipelines Architectural Overview] documentation. +## Deploying Optional Components + +### MariaDB +To deploy a standalone MariaDB metadata database (rather than providing your own database connection details), simply add a `mariaDB` item under the `spec.database` in your DSPA definition with an `deploy` key set to `true`. All other fields are defaultable/optional, see [All Fields DSPA Example](./config/samples/dspa_all_fields.yaml) for full details. Note that this component is mutually exclusive with externally-provided databases (defined by `spec.database.externalDB`). + +``` +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: sample +spec: + ... + database: + mariaDB: # mutually exclusive with externalDB + deploy: true + +``` + +### Minio +To deploy a Minio Object Storage component (rather than providing your own object storage connection details), simply add a `minio` item under the `spec.objectStorage` in your DSPA definition with an `image` key set to a valid minio component container image. All other fields are defaultable/optional, see [All Fields DSPA Example](./config/samples/dspa_all_fields.yaml) for full details. Note that this component is mutually exclusive with externally-provided object stores (defined by `spec.objectStorage.externalStorage`). + +``` +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: sample +spec: + ... + objectStorage: + minio: # mutually exclusive with externalStorage + deploy: true + # Image field is required + image: 'quay.io/opendatahub/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance' +``` + +### ML Pipelines UI +To deploy the standalone DS Pipelines UI component, simply add a `spec.mlpipelineUI` item to your DSPA with an `image` key set to a valid ui component container image. All other fields are defaultable/optional, see [All Fields DSPA Example](./config/samples/dspa_all_fields.yaml) for full details. + +``` +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: sample +spec: + ... + mlpipelineUI: + deploy: true + # Image field is required + image: 'quay.io/opendatahub/odh-ml-pipelines-frontend-container:beta-ui' +``` + + +### ML Metadata +To deploy the ML Metadata artifact linage/metadata component, simply add a `spec.mlmd` item to your DSPA with `deploy` set to `true`. All other fields are defaultable/optional, see [All Fields DSPA Example](./config/samples/dspa_all_fields.yaml) for full details. + +``` +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: sample +spec: + ... + mlmd: + deploy: true +``` + + # Using a DataSciencePipelinesApplication When a `DataSciencePipelinesApplication` is deployed, use the MLPipelines UI endpoint to interact with DSP, either via a GUI or via API calls. From 000bd519f1a036cc94338a6ab6dd71ab6ee2139f Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Mon, 5 Jun 2023 17:22:39 -0400 Subject: [PATCH 08/19] Add dspa label to deployments. Fix test-cases for label change. Signed-off-by: Humair Khan --- config/internal/apiserver/deployment.yaml.tmpl | 3 +++ config/internal/mariadb/deployment.yaml.tmpl | 3 +++ config/internal/minio/deployment.yaml.tmpl | 3 +++ .../internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl | 3 +++ config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl | 3 +++ .../internal/ml-metadata/metadata-writer.deployment.yaml.tmpl | 3 +++ config/internal/mlpipelines-ui/deployment.yaml.tmpl | 3 +++ config/internal/persistence-agent/deployment.yaml.tmpl | 3 +++ config/internal/scheduled-workflow/deployment.yaml.tmpl | 3 +++ .../case_0/expected/created/apiserver_deployment.yaml | 3 +++ .../case_0/expected/created/mariadb_deployment.yaml | 3 +++ .../case_0/expected/created/persistence-agent_deployment.yaml | 3 +++ .../case_0/expected/created/scheduled-workflow_deployment.yaml | 3 +++ .../case_2/expected/created/apiserver_deployment.yaml | 3 +++ .../case_2/expected/created/mariadb_deployment.yaml | 3 +++ .../declarative/case_2/expected/created/minio_deployment.yaml | 3 +++ .../case_2/expected/created/mlpipelines-ui_deployment.yaml | 3 +++ .../case_2/expected/created/persistence-agent_deployment.yaml | 3 +++ .../case_2/expected/created/scheduled-workflow_deployment.yaml | 3 +++ .../case_3/expected/created/apiserver_deployment.yaml | 3 +++ .../case_4/expected/created/apiserver_deployment.yaml | 3 +++ .../case_4/expected/created/mariadb_deployment.yaml | 3 +++ .../declarative/case_4/expected/created/minio_deployment.yaml | 3 +++ .../case_4/expected/created/mlpipelines-ui_deployment.yaml | 3 +++ .../case_4/expected/created/persistence-agent_deployment.yaml | 3 +++ .../case_4/expected/created/scheduled-workflow_deployment.yaml | 3 +++ .../case_5/expected/created/apiserver_deployment.yaml | 3 +++ .../case_5/expected/created/mariadb_deployment.yaml | 3 +++ .../case_5/expected/created/metadata-envoy_deployment.yaml | 3 +++ .../case_5/expected/created/metadata-grpc_deployment.yaml | 3 +++ .../case_5/expected/created/metadata-writer_deployment.yaml | 3 +++ .../case_5/expected/created/mlpipelines-ui_deployment.yaml | 3 +++ .../case_5/expected/created/persistence-agent_deployment.yaml | 3 +++ .../case_5/expected/created/scheduled-workflow_deployment.yaml | 3 +++ 34 files changed, 102 insertions(+) diff --git a/config/internal/apiserver/deployment.yaml.tmpl b/config/internal/apiserver/deployment.yaml.tmpl index 0a18293ec..2e42d702d 100644 --- a/config/internal/apiserver/deployment.yaml.tmpl +++ b/config/internal/apiserver/deployment.yaml.tmpl @@ -6,16 +6,19 @@ metadata: labels: app: ds-pipeline-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: selector: matchLabels: app: ds-pipeline-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: labels: app: ds-pipeline-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - env: diff --git a/config/internal/mariadb/deployment.yaml.tmpl b/config/internal/mariadb/deployment.yaml.tmpl index 0ae31f1b4..88a9c1c57 100644 --- a/config/internal/mariadb/deployment.yaml.tmpl +++ b/config/internal/mariadb/deployment.yaml.tmpl @@ -7,6 +7,7 @@ metadata: labels: app: mariadb-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: strategy: # Need this since backing PVC is ReadWriteOnce, @@ -17,11 +18,13 @@ spec: matchLabels: app: mariadb-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: labels: app: mariadb-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - name: mariadb diff --git a/config/internal/minio/deployment.yaml.tmpl b/config/internal/minio/deployment.yaml.tmpl index 7c88621eb..84a1c2415 100644 --- a/config/internal/minio/deployment.yaml.tmpl +++ b/config/internal/minio/deployment.yaml.tmpl @@ -6,11 +6,13 @@ metadata: labels: app: minio-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: selector: matchLabels: app: minio-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} strategy: type: Recreate template: @@ -18,6 +20,7 @@ spec: labels: app: minio-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - args: diff --git a/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl b/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl index a7b1cc888..2ff91c078 100644 --- a/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl +++ b/config/internal/ml-metadata/metadata-envoy.deployment.yaml.tmpl @@ -6,12 +6,14 @@ metadata: labels: app: ds-pipeline-metadata-envoy-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: replicas: 1 selector: matchLabels: app: ds-pipeline-metadata-envoy-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: annotations: @@ -19,6 +21,7 @@ spec: labels: app: ds-pipeline-metadata-envoy-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - image: {{.MLMD.Envoy.Image}} diff --git a/config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl b/config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl index e64702005..543d03b65 100644 --- a/config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl +++ b/config/internal/ml-metadata/metadata-grpc.deployment.yaml.tmpl @@ -6,17 +6,20 @@ metadata: labels: app: ds-pipeline-metadata-grpc-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: replicas: 1 selector: matchLabels: app: ds-pipeline-metadata-grpc-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: labels: app: ds-pipeline-metadata-grpc-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - args: diff --git a/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl b/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl index d6ff143cd..39068eaf2 100644 --- a/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl +++ b/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl @@ -6,17 +6,20 @@ metadata: labels: app: ds-pipeline-metadata-writer-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: replicas: 1 selector: matchLabels: app: ds-pipeline-metadata-writer-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: labels: app: ds-pipeline-metadata-writer-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - env: diff --git a/config/internal/mlpipelines-ui/deployment.yaml.tmpl b/config/internal/mlpipelines-ui/deployment.yaml.tmpl index c4f49b727..b85feebcd 100644 --- a/config/internal/mlpipelines-ui/deployment.yaml.tmpl +++ b/config/internal/mlpipelines-ui/deployment.yaml.tmpl @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-ui-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: selector: matchLabels: app: ds-pipeline-ui-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-ui-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - env: diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index 2fd0d8ca9..1c160ec59 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-persistenceagent-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: selector: matchLabels: app: ds-pipeline-persistenceagent-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-persistenceagent-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - env: diff --git a/config/internal/scheduled-workflow/deployment.yaml.tmpl b/config/internal/scheduled-workflow/deployment.yaml.tmpl index ce2f5bf32..c5a5da5b5 100644 --- a/config/internal/scheduled-workflow/deployment.yaml.tmpl +++ b/config/internal/scheduled-workflow/deployment.yaml.tmpl @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-scheduledworkflow-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: selector: matchLabels: app: ds-pipeline-scheduledworkflow-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-scheduledworkflow-{{.Name}} component: data-science-pipelines + dspa: {{.Name}} spec: containers: - env: diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index 3db41f0a8..fa277a796 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -6,16 +6,19 @@ metadata: labels: app: ds-pipeline-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: selector: matchLabels: app: ds-pipeline-testdsp0 component: data-science-pipelines + dspa: testdsp0 template: metadata: labels: app: ds-pipeline-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_0/expected/created/mariadb_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/mariadb_deployment.yaml index 92682abda..20aad1f79 100644 --- a/controllers/testdata/declarative/case_0/expected/created/mariadb_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/mariadb_deployment.yaml @@ -7,6 +7,7 @@ metadata: labels: app: mariadb-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: strategy: type: Recreate # Need this since backing PVC is ReadWriteOnce, which creates resource lock condition in default Rolling strategy @@ -14,11 +15,13 @@ spec: matchLabels: app: mariadb-testdsp0 component: data-science-pipelines + dspa: testdsp0 template: metadata: labels: app: mariadb-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: containers: - name: mariadb diff --git a/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml index 3cda498b4..bf0171dc3 100644 --- a/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-persistenceagent-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: selector: matchLabels: app: ds-pipeline-persistenceagent-testdsp0 component: data-science-pipelines + dspa: testdsp0 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-persistenceagent-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml index 5885adbbf..d069ca8fb 100644 --- a/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-scheduledworkflow-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: selector: matchLabels: app: ds-pipeline-scheduledworkflow-testdsp0 component: data-science-pipelines + dspa: testdsp0 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-scheduledworkflow-testdsp0 component: data-science-pipelines + dspa: testdsp0 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index 0ae552568..5c1263828 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -6,16 +6,19 @@ metadata: labels: app: ds-pipeline-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: selector: matchLabels: app: ds-pipeline-testdsp2 component: data-science-pipelines + dspa: testdsp2 template: metadata: labels: app: ds-pipeline-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_2/expected/created/mariadb_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/mariadb_deployment.yaml index 9076adf13..d122f60d7 100644 --- a/controllers/testdata/declarative/case_2/expected/created/mariadb_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/mariadb_deployment.yaml @@ -7,6 +7,7 @@ metadata: labels: app: mariadb-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: strategy: type: Recreate # Need this since backing PVC is ReadWriteOnce, which creates resource lock condition in default Rolling strategy @@ -14,11 +15,13 @@ spec: matchLabels: app: mariadb-testdsp2 component: data-science-pipelines + dspa: testdsp2 template: metadata: labels: app: mariadb-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: containers: - name: mariadb diff --git a/controllers/testdata/declarative/case_2/expected/created/minio_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/minio_deployment.yaml index 65b55da19..8bf87f744 100644 --- a/controllers/testdata/declarative/case_2/expected/created/minio_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/minio_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: minio-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: selector: matchLabels: app: minio-testdsp2 component: data-science-pipelines + dspa: testdsp2 strategy: type: Recreate template: @@ -18,6 +20,7 @@ spec: labels: app: minio-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: containers: - args: diff --git a/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml index 06cfba857..4430c6509 100644 --- a/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/mlpipelines-ui_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-ui-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: selector: matchLabels: app: ds-pipeline-ui-testdsp2 component: data-science-pipelines + dspa: testdsp2 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-ui-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml index 9c5a77ad7..8342eace3 100644 --- a/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-persistenceagent-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: selector: matchLabels: app: ds-pipeline-persistenceagent-testdsp2 component: data-science-pipelines + dspa: testdsp2 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-persistenceagent-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml index 1f889d32b..f912bc2f7 100644 --- a/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-scheduledworkflow-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: selector: matchLabels: app: ds-pipeline-scheduledworkflow-testdsp2 component: data-science-pipelines + dspa: testdsp2 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-scheduledworkflow-testdsp2 component: data-science-pipelines + dspa: testdsp2 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index accfd5e7b..0b617788d 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -6,16 +6,19 @@ metadata: labels: app: ds-pipeline-testdsp3 component: data-science-pipelines + dspa: testdsp3 spec: selector: matchLabels: app: ds-pipeline-testdsp3 component: data-science-pipelines + dspa: testdsp3 template: metadata: labels: app: ds-pipeline-testdsp3 component: data-science-pipelines + dspa: testdsp3 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index dd80d9d21..94524294c 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -6,16 +6,19 @@ metadata: labels: app: ds-pipeline-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: selector: matchLabels: app: ds-pipeline-testdsp4 component: data-science-pipelines + dspa: testdsp4 template: metadata: labels: app: ds-pipeline-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_4/expected/created/mariadb_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/mariadb_deployment.yaml index 54df48ea6..3130c29a4 100644 --- a/controllers/testdata/declarative/case_4/expected/created/mariadb_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/mariadb_deployment.yaml @@ -7,6 +7,7 @@ metadata: labels: app: mariadb-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: strategy: type: Recreate # Need this since backing PVC is ReadWriteOnce, which creates resource lock condition in default Rolling strategy @@ -14,11 +15,13 @@ spec: matchLabels: app: mariadb-testdsp4 component: data-science-pipelines + dspa: testdsp4 template: metadata: labels: app: mariadb-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: containers: - name: mariadb diff --git a/controllers/testdata/declarative/case_4/expected/created/minio_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/minio_deployment.yaml index 5a672b605..df5699578 100644 --- a/controllers/testdata/declarative/case_4/expected/created/minio_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/minio_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: minio-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: selector: matchLabels: app: minio-testdsp4 component: data-science-pipelines + dspa: testdsp4 strategy: type: Recreate template: @@ -18,6 +20,7 @@ spec: labels: app: minio-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: containers: - args: diff --git a/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml index abc6f1f0c..131b3cca3 100644 --- a/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/mlpipelines-ui_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-ui-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: selector: matchLabels: app: ds-pipeline-ui-testdsp4 component: data-science-pipelines + dspa: testdsp4 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-ui-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml index 08fee7bd3..da750bb99 100644 --- a/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-persistenceagent-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: selector: matchLabels: app: ds-pipeline-persistenceagent-testdsp4 component: data-science-pipelines + dspa: testdsp4 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-persistenceagent-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml index 2e3d9c80b..0d7e88db6 100644 --- a/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-scheduledworkflow-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: selector: matchLabels: app: ds-pipeline-scheduledworkflow-testdsp4 component: data-science-pipelines + dspa: testdsp4 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-scheduledworkflow-testdsp4 component: data-science-pipelines + dspa: testdsp4 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index 7ecef37aa..92f6ac5b9 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -6,16 +6,19 @@ metadata: labels: app: ds-pipeline-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: selector: matchLabels: app: ds-pipeline-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: labels: app: ds-pipeline-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml index 94df4dafc..95276f7f3 100644 --- a/controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/mariadb_deployment.yaml @@ -7,6 +7,7 @@ metadata: labels: app: mariadb-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: strategy: type: Recreate # Need this since backing PVC is ReadWriteOnce, which creates resource lock condition in default Rolling strategy @@ -14,11 +15,13 @@ spec: matchLabels: app: mariadb-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: labels: app: mariadb-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - name: mariadb diff --git a/controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml index b8dfebe7a..d11375128 100644 --- a/controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/metadata-envoy_deployment.yaml @@ -6,12 +6,14 @@ metadata: labels: app: ds-pipeline-metadata-envoy-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: replicas: 1 selector: matchLabels: app: ds-pipeline-metadata-envoy-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: annotations: @@ -19,6 +21,7 @@ spec: labels: app: ds-pipeline-metadata-envoy-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - image: metadata-envoy:test5 diff --git a/controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml index 6c202e0ff..48b8e3959 100644 --- a/controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/metadata-grpc_deployment.yaml @@ -6,17 +6,20 @@ metadata: labels: app: ds-pipeline-metadata-grpc-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: replicas: 1 selector: matchLabels: app: ds-pipeline-metadata-grpc-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: labels: app: ds-pipeline-metadata-grpc-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - args: diff --git a/controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml index 4248db028..908cf42cb 100644 --- a/controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/metadata-writer_deployment.yaml @@ -6,17 +6,20 @@ metadata: labels: app: ds-pipeline-metadata-writer-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: replicas: 1 selector: matchLabels: app: ds-pipeline-metadata-writer-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: labels: app: ds-pipeline-metadata-writer-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml index d9664634f..da6900631 100644 --- a/controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/mlpipelines-ui_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-ui-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: selector: matchLabels: app: ds-pipeline-ui-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-ui-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml index 0d4ccd5d9..3255d1281 100644 --- a/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-persistenceagent-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: selector: matchLabels: app: ds-pipeline-persistenceagent-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-persistenceagent-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - env: diff --git a/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml index fb06409b0..d03c4daf8 100644 --- a/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml @@ -6,11 +6,13 @@ metadata: labels: app: ds-pipeline-scheduledworkflow-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: selector: matchLabels: app: ds-pipeline-scheduledworkflow-testdsp5 component: data-science-pipelines + dspa: testdsp5 template: metadata: annotations: @@ -18,6 +20,7 @@ spec: labels: app: ds-pipeline-scheduledworkflow-testdsp5 component: data-science-pipelines + dspa: testdsp5 spec: containers: - env: From edad960812c9f729b552a0aa860ca2e33d05f7b0 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Mon, 29 May 2023 12:50:11 -0400 Subject: [PATCH 09/19] Move status generation to separate procedure. Currently status generation does not need to be intertwined within the reconcile procedure and can be generated near the end of the loop all together before publishing metrics. As such this change de couples the generation logic into it's own procedure. Signed-off-by: Humair Khan --- controllers/dspipeline_controller.go | 105 +++++++++++++++++---------- 1 file changed, 67 insertions(+), 38 deletions(-) diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 90e47dd51..23d380050 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -19,7 +19,6 @@ package controllers import ( "context" "fmt" - "github.com/go-logr/logr" mf "github.com/manifestival/manifestival" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" @@ -119,7 +118,12 @@ func (r *DSPAReconciler) buildCondition(conditionType string, dspa *dspav1alpha1 return condition } -func (r *DSPAReconciler) isDeploymentAvailable(ctx context.Context, dspa *dspav1alpha1.DataSciencePipelinesApplication, name string) bool { +// isDeploymentInCondition evaluates if condition with "name" is in condition of type "conditionType". +// this procedure is valid only for conditions with bool status type, for conditions of non bool type +// results are undefined. +func (r *DSPAReconciler) isDeploymentInCondition(ctx context.Context, + dspa *dspav1alpha1.DataSciencePipelinesApplication, name string, + conditionType appsv1.DeploymentConditionType) (bool, appsv1.DeploymentCondition) { found := &appsv1.Deployment{} // Every Deployment in DSPA is the name followed by the DSPA CR name @@ -128,15 +132,15 @@ func (r *DSPAReconciler) isDeploymentAvailable(ctx context.Context, dspa *dspav1 err := r.Get(ctx, types.NamespacedName{Name: component, Namespace: dspa.Namespace}, found) if err == nil { if found.Spec.Replicas != nil && *found.Spec.Replicas == 0 { - return false + return false, appsv1.DeploymentCondition{} } for _, s := range found.Status.Conditions { - if s.Type == "Available" && s.Status == corev1.ConditionTrue { - return true + if s.Type == conditionType && s.Status == corev1.ConditionTrue { + return true, s } } } - return false + return false, appsv1.DeploymentCondition{} } //+kubebuilder:rbac:groups=datasciencepipelinesapplications.opendatahub.io,resources=datasciencepipelinesapplications,verbs=get;list;watch;create;update;patch;delete @@ -222,9 +226,6 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, nil } - // Initialize conditions - var conditions []metav1.Condition - err = r.ReconcileDatabase(ctx, dspa, params) if err != nil { return ctrl.Result{}, err @@ -240,39 +241,21 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, err } - apiServerReady := r.buildCondition(config.APIServerReady, dspa, config.MinimumReplicasAvailable) err = r.ReconcileAPIServer(ctx, dspa, params) if err != nil { return ctrl.Result{}, err } - if r.isDeploymentAvailable(ctx, dspa, "ds-pipeline") { - apiServerReady.Status = metav1.ConditionTrue - } - conditions = append(conditions, apiServerReady) - - persistenceAgentReady := r.buildCondition(config.PersistenceAgentReady, dspa, config.MinimumReplicasAvailable) err = r.ReconcilePersistenceAgent(dspa, params) if err != nil { return ctrl.Result{}, err } - if r.isDeploymentAvailable(ctx, dspa, "ds-pipeline-persistenceagent") { - persistenceAgentReady.Status = metav1.ConditionTrue - } - conditions = append(conditions, persistenceAgentReady) - - scheduledWorkflowReady := r.buildCondition(config.ScheduledWorkflowReady, dspa, config.MinimumReplicasAvailable) err = r.ReconcileScheduledWorkflow(dspa, params) if err != nil { return ctrl.Result{}, err } - if r.isDeploymentAvailable(ctx, dspa, "ds-pipeline-scheduledworkflow") { - scheduledWorkflowReady.Status = metav1.ConditionTrue - } - conditions = append(conditions, scheduledWorkflowReady) - err = r.ReconcileUI(dspa, params) if err != nil { return ctrl.Result{}, err @@ -291,6 +274,62 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, err } + conditions := r.GenerateStatus(ctx, dspa) + dspa.Status.Conditions = conditions + + // Update Status + err = r.Status().Update(ctx, dspa) + if err != nil { + log.Info(err.Error()) + return ctrl.Result{}, err + } + + r.PublishMetrics( + dspa, + GetConditionByType(config.APIServerReady, conditions), + GetConditionByType(config.PersistenceAgentReady, conditions), + GetConditionByType(config.ScheduledWorkflowReady, conditions), + GetConditionByType(config.CrReady, conditions), + ) + + return ctrl.Result{}, nil +} + +// GetConditionByType returns condition of type T if it exists in conditions, otherwise +// return empty condition struct. +func GetConditionByType(t string, conditions []metav1.Condition) metav1.Condition { + for _, c := range conditions { + if c.Type == t { + return c + } + } + return metav1.Condition{} +} + +func (r *DSPAReconciler) GenerateStatus(ctx context.Context, dspa *dspav1alpha1.DataSciencePipelinesApplication) []metav1.Condition { + var conditions []metav1.Condition + + apiServerReady := r.buildCondition(config.APIServerReady, dspa, config.MinimumReplicasAvailable) + deploymentAvailable, _ := r.isDeploymentInCondition(ctx, dspa, "ds-pipeline", appsv1.DeploymentAvailable) + if deploymentAvailable { + apiServerReady.Status = metav1.ConditionTrue + } + conditions = append(conditions, apiServerReady) + + persistenceAgentReady := r.buildCondition(config.PersistenceAgentReady, dspa, config.MinimumReplicasAvailable) + deploymentAvailable, _ = r.isDeploymentInCondition(ctx, dspa, "ds-pipeline-persistenceagent", appsv1.DeploymentAvailable) + if deploymentAvailable { + persistenceAgentReady.Status = metav1.ConditionTrue + } + conditions = append(conditions, persistenceAgentReady) + + scheduledWorkflowReady := r.buildCondition(config.ScheduledWorkflowReady, dspa, config.MinimumReplicasAvailable) + deploymentAvailable, _ = r.isDeploymentInCondition(ctx, dspa, "ds-pipeline-scheduledworkflow", appsv1.DeploymentAvailable) + if deploymentAvailable { + scheduledWorkflowReady.Status = metav1.ConditionTrue + } + conditions = append(conditions, scheduledWorkflowReady) + crReady := r.buildCondition(config.CrReady, dspa, config.MinimumReplicasAvailable) crReady.Type = config.CrReady @@ -309,18 +348,8 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. conditions[i].LastTransitionTime = condition.LastTransitionTime } } - dspa.Status.Conditions = conditions - // Update Status - err = r.Status().Update(ctx, dspa) - if err != nil { - log.Info(err.Error()) - return ctrl.Result{}, err - } - - r.PublishMetrics(dspa, apiServerReady, persistenceAgentReady, scheduledWorkflowReady, crReady) - - return ctrl.Result{}, nil + return conditions } func (r *DSPAReconciler) PublishMetrics(dspa *dspav1alpha1.DataSciencePipelinesApplication, From b54a86179c88a407f5053458eebab41c27c02f25 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Tue, 30 May 2023 08:15:08 -0400 Subject: [PATCH 10/19] Add failure status condidions. Signed-off-by: Humair Khan --- controllers/config/defaults.go | 19 ++- controllers/dspipeline_controller.go | 227 ++++++++++++++++++++++----- 2 files changed, 199 insertions(+), 47 deletions(-) diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 628445ae9..fdf841b9d 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -67,12 +67,23 @@ const ( MlmdWriterImagePath = "Images.MlmdWriter" ) +// DSPA Status Condition Types +const ( + APIServerReady = "APIServerReady" + PersistenceAgentReady = "PersistenceAgentReady" + ScheduledWorkflowReady = "ScheduledWorkflowReady" + CrReady = "Ready" +) + +// DSPA Ready Status Condition Reasons +// As per k8s api convention: Reason is intended +// to be used in concise output, such as one-line +// kubectl get output, and in summarizing +// occurrences of causes const ( - APIServerReady = "APIServerReady" - PersistenceAgentReady = "PersistenceAgentReady" - ScheduledWorkflowReady = "ScheduledWorkflowReady" - CrReady = "Ready" MinimumReplicasAvailable = "MinimumReplicasAvailable" + FailingToDeploy = "FailingToDeploy" + Deploying = "Deploying" ) // Any required Configmap paths can be added here, diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 23d380050..5770ee04f 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -34,6 +34,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" ) const finalizerName = "datasciencepipelinesapplications.opendatahub.io/finalizer" @@ -118,29 +121,14 @@ func (r *DSPAReconciler) buildCondition(conditionType string, dspa *dspav1alpha1 return condition } -// isDeploymentInCondition evaluates if condition with "name" is in condition of type "conditionType". -// this procedure is valid only for conditions with bool status type, for conditions of non bool type -// results are undefined. -func (r *DSPAReconciler) isDeploymentInCondition(ctx context.Context, - dspa *dspav1alpha1.DataSciencePipelinesApplication, name string, - conditionType appsv1.DeploymentConditionType) (bool, appsv1.DeploymentCondition) { - found := &appsv1.Deployment{} - - // Every Deployment in DSPA is the name followed by the DSPA CR name - component := name + "-" + dspa.Name - - err := r.Get(ctx, types.NamespacedName{Name: component, Namespace: dspa.Namespace}, found) - if err == nil { - if found.Spec.Replicas != nil && *found.Spec.Replicas == 0 { - return false, appsv1.DeploymentCondition{} - } - for _, s := range found.Status.Conditions { - if s.Type == conditionType && s.Status == corev1.ConditionTrue { - return true, s - } +func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c } } - return false, appsv1.DeploymentCondition{} + return nil } //+kubebuilder:rbac:groups=datasciencepipelinesapplications.opendatahub.io,resources=datasciencepipelinesapplications,verbs=get;list;watch;create;update;patch;delete @@ -274,7 +262,11 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, err } - conditions := r.GenerateStatus(ctx, dspa) + err, conditions := r.GenerateStatus(ctx, dspa) + if err != nil { + log.Info(err.Error()) + return ctrl.Result{}, err + } dspa.Status.Conditions = conditions // Update Status @@ -306,40 +298,162 @@ func GetConditionByType(t string, conditions []metav1.Condition) metav1.Conditio return metav1.Condition{} } -func (r *DSPAReconciler) GenerateStatus(ctx context.Context, dspa *dspav1alpha1.DataSciencePipelinesApplication) []metav1.Condition { - var conditions []metav1.Condition +// isDeploymentInCondition evaluates if condition with "name" is in condition of type "conditionType". +// this procedure is valid only for conditions with bool status type, for conditions of non bool type +// results are undefined. +func (r *DSPAReconciler) handleReadyCondition( + ctx context.Context, + dspa *dspav1alpha1.DataSciencePipelinesApplication, + name string, + condition string, +) (error, metav1.Condition) { + readyCondition := r.buildCondition(condition, dspa, config.MinimumReplicasAvailable) + deployment := &appsv1.Deployment{} - apiServerReady := r.buildCondition(config.APIServerReady, dspa, config.MinimumReplicasAvailable) - deploymentAvailable, _ := r.isDeploymentInCondition(ctx, dspa, "ds-pipeline", appsv1.DeploymentAvailable) - if deploymentAvailable { - apiServerReady.Status = metav1.ConditionTrue + // Every Deployment in DSPA is the name followed by the DSPA CR name + component := name + "-" + dspa.Name + + err := r.Get(ctx, types.NamespacedName{Name: component, Namespace: dspa.Namespace}, deployment) + if err != nil { + return err, metav1.Condition{} + } + + // First check if deployment is scaled down, if it is, component is deemed not ready + if deployment.Spec.Replicas != nil && *deployment.Spec.Replicas == 0 { + readyCondition.Reason = config.MinimumReplicasAvailable + readyCondition.Status = metav1.ConditionFalse + readyCondition.Message = fmt.Sprintf("Deployment for component \"%s\" is scaled down.", component) + return nil, readyCondition + } + + // At this point component is not minimally available, possible scenarios: + // 1. Component deployment has encountered errors + // 2. Component is still deploying + // We check for (1), and if no errors are found we presume (2) + + progressingCond := GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) + availableCond := GetDeploymentCondition(deployment.Status, appsv1.DeploymentAvailable) + replicaFailureCond := GetDeploymentCondition(deployment.Status, appsv1.DeploymentReplicaFailure) + + if availableCond != nil && availableCond.Status == corev1.ConditionTrue { + // If this DSPA component is minimally available, we are done. + readyCondition.Reason = config.MinimumReplicasAvailable + readyCondition.Status = metav1.ConditionTrue + readyCondition.Message = fmt.Sprintf("Component [%s] is minimally available.", component) + return nil, readyCondition + } + + // There are two possible reasons for progress failing, deadline and replica create error: + // https://github.com/kubernetes/kubernetes/blob/release-1.27/pkg/controller/deployment/util/deployment_util.go#L69 + // We check for both to investigate potential issues during deployment + if progressingCond != nil && progressingCond.Status == corev1.ConditionFalse && + (progressingCond.Reason == "ProgressDeadlineExceeded" || progressingCond.Reason == "ReplicaSetCreateError") { + readyCondition.Reason = config.FailingToDeploy + readyCondition.Status = metav1.ConditionFalse + readyCondition.Message = fmt.Sprintf("Component [%s] has failed to progress. Reason: [%s]. "+ + "Message: [%s]", component, progressingCond.Reason, progressingCond.Message) + return nil, readyCondition + } + + if replicaFailureCond != nil && replicaFailureCond.Status == corev1.ConditionTrue { + readyCondition.Reason = config.FailingToDeploy + readyCondition.Status = metav1.ConditionFalse + readyCondition.Message = fmt.Sprintf("Component's replica [%s] has failed to create. Reason: [%s]. "+ + "Message: [%s]", component, replicaFailureCond.Reason, replicaFailureCond.Message) + return nil, readyCondition + } + + // Search through the pods associated with this deployment + // if a failed pod is encountered, report Ready=false with failure + // message + podList := &corev1.PodList{} + opts := []client.ListOption{ + client.MatchingLabels(deployment.Spec.Selector.MatchLabels), + } + err = r.Client.List(ctx, podList, opts...) + if err != nil { + return err, metav1.Condition{} + } + + hasPodFailures := false + podFailureMessage := "" + // We loop through all pods within this deployment and inspect their statuses for failures + // Any failure detected in any pod results in FailingToDeploy status + for _, p := range podList.Items { + if p.Status.Phase == corev1.PodFailed { + hasPodFailures = true + podFailureMessage += fmt.Sprintf("Pod named [%s] that is associated with this component [%s] "+ + "is in failed phase.", p.Name, component) + } + // We loop through the containers in each pod, as in some cases the Pod can be in pending state + // but an individual container may be failing due to runtime errors. + for _, c := range p.Status.ContainerStatuses { + if c.State.Waiting != nil && c.State.Waiting.Reason == "CrashLoopBackOff" { + readyCondition.Reason = config.FailingToDeploy + readyCondition.Status = metav1.ConditionFalse + // We concatenate messages from all failing containers. + readyCondition.Message = fmt.Sprintf("Component [%s] is in CrashLoopBackOff. "+ + "Message from pod: [%s]", component, c.State.Waiting.Message) + return nil, readyCondition + } + } } - conditions = append(conditions, apiServerReady) - persistenceAgentReady := r.buildCondition(config.PersistenceAgentReady, dspa, config.MinimumReplicasAvailable) - deploymentAvailable, _ = r.isDeploymentInCondition(ctx, dspa, "ds-pipeline-persistenceagent", appsv1.DeploymentAvailable) - if deploymentAvailable { - persistenceAgentReady.Status = metav1.ConditionTrue + if hasPodFailures { + readyCondition.Status = metav1.ConditionFalse + readyCondition.Reason = config.FailingToDeploy + readyCondition.Message = podFailureMessage + return nil, readyCondition } - conditions = append(conditions, persistenceAgentReady) - scheduledWorkflowReady := r.buildCondition(config.ScheduledWorkflowReady, dspa, config.MinimumReplicasAvailable) - deploymentAvailable, _ = r.isDeploymentInCondition(ctx, dspa, "ds-pipeline-scheduledworkflow", appsv1.DeploymentAvailable) - if deploymentAvailable { - scheduledWorkflowReady.Status = metav1.ConditionTrue + // No errors encountered, assume deployment is progressing successfully + // If this DSPA component is minimally available, we are done. + readyCondition.Reason = config.Deploying + readyCondition.Status = metav1.ConditionFalse + readyCondition.Message = fmt.Sprintf("Component [%s] is deploying.", component) + return nil, readyCondition + +} + +func (r *DSPAReconciler) GenerateStatus(ctx context.Context, dspa *dspav1alpha1.DataSciencePipelinesApplication) (error, []metav1.Condition) { + + err, apiServerReady := r.handleReadyCondition(ctx, dspa, "ds-pipeline", config.APIServerReady) + if err != nil { + return err, []metav1.Condition{} + } + err, persistenceAgentReady := r.handleReadyCondition(ctx, dspa, "ds-pipeline-persistenceagent", config.PersistenceAgentReady) + if err != nil { + return err, []metav1.Condition{} + } + err, scheduledWorkflowReady := r.handleReadyCondition(ctx, dspa, "ds-pipeline-scheduledworkflow", config.ScheduledWorkflowReady) + if err != nil { + return err, []metav1.Condition{} } + var conditions []metav1.Condition + conditions = append(conditions, apiServerReady) + conditions = append(conditions, persistenceAgentReady) conditions = append(conditions, scheduledWorkflowReady) + // Compute Ready Logic for the CR crReady := r.buildCondition(config.CrReady, dspa, config.MinimumReplicasAvailable) crReady.Type = config.CrReady - // Compute Ready Logic for the CR - if (apiServerReady.Status == metav1.ConditionTrue) && - (persistenceAgentReady.Status == metav1.ConditionTrue) && - (scheduledWorkflowReady.Status == metav1.ConditionTrue) { + componentConditions := []metav1.Condition{apiServerReady, persistenceAgentReady, scheduledWorkflowReady} + allReady := true + failureMessages := "" + for _, c := range componentConditions { + if c.Status == metav1.ConditionFalse { + allReady = false + failureMessages += fmt.Sprintf("%s \n", c.Message) + } + } + + if allReady { crReady.Status = metav1.ConditionTrue + crReady.Message = "All components are ready." } else { crReady.Status = metav1.ConditionFalse + crReady.Message = failureMessages } conditions = append(conditions, crReady) @@ -349,7 +463,7 @@ func (r *DSPAReconciler) GenerateStatus(ctx context.Context, dspa *dspav1alpha1. } } - return conditions + return nil, conditions } func (r *DSPAReconciler) PublishMetrics(dspa *dspav1alpha1.DataSciencePipelinesApplication, @@ -403,6 +517,33 @@ func (r *DSPAReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&rbacv1.Role{}). Owns(&rbacv1.RoleBinding{}). Owns(&routev1.Route{}). + // Watch for Pods belonging to DSPA + Watches(&source.Kind{Type: &corev1.Pod{}}, + handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { + log := r.Log.WithValues("namespace", o.GetNamespace()) + + component, hasComponentLabel := o.GetLabels()["component"] + + if !hasComponentLabel || (component != "data-science-pipelines") { + return []reconcile.Request{} + } + + dspaName, hasDSPALabel := o.GetLabels()["dspa"] + if !hasDSPALabel { + msg := fmt.Sprintf("Pod with data-science-pipelines label encountered, but is missing dspa "+ + "label, could not reconcile on [Pod: %s] ", o.GetName()) + log.V(1).Info(msg) + return []reconcile.Request{} + } + + log.V(1).Info(fmt.Sprintf("Reconcile event triggered by [Pod: %s] ", o.GetName())) + namespacedName := types.NamespacedName{ + Name: dspaName, + Namespace: o.GetNamespace(), + } + reconcileRequests := append([]reconcile.Request{}, reconcile.Request{NamespacedName: namespacedName}) + return reconcileRequests + })). // TODO: Add watcher for ui cluster rbac since it has no owner Complete(r) } From becc65c87b81322d7d38b2b9b4e873a1ad70dab1 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Wed, 7 Jun 2023 13:31:40 -0400 Subject: [PATCH 11/19] Correct logged secret name when secret DNE. Signed-off-by: Humair Khan --- controllers/dspipeline_params.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 0a35e0e14..bac9946b3 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -320,8 +320,8 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc p.ObjectStorageConnection.SecretAccessKey = base64.StdEncoding.EncodeToString([]byte(generatedPass)) createNewSecret = true } else { - log.Error(err, fmt.Sprintf("Storage secret %s was specified in CR but does not exist.", - p.ObjectStorageConnection.CredentialsSecret.SecretName)) + log.Error(err, fmt.Sprintf("Storage secret [%s] was specified in CR but does not exist.", + credsSecretName)) return err } } else if err != nil { From 74305c592ad1e620b7a1de9b79708fb34daf645a Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Wed, 7 Jun 2023 13:31:59 -0400 Subject: [PATCH 12/19] Move utility functions to utility package. Signed-off-by: Humair Khan --- controllers/dspipeline_controller.go | 36 ++++++----------------- controllers/util/util.go | 43 ++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 28 deletions(-) create mode 100644 controllers/util/util.go diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 5770ee04f..14f9869f7 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -23,6 +23,7 @@ import ( mf "github.com/manifestival/manifestival" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" + "github.com/opendatahub-io/data-science-pipelines-operator/controllers/util" routev1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -121,16 +122,6 @@ func (r *DSPAReconciler) buildCondition(conditionType string, dspa *dspav1alpha1 return condition } -func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} - //+kubebuilder:rbac:groups=datasciencepipelinesapplications.opendatahub.io,resources=datasciencepipelinesapplications,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=datasciencepipelinesapplications.opendatahub.io,resources=datasciencepipelinesapplications/status,verbs=get;update;patch //+kubebuilder:rbac:groups=datasciencepipelinesapplications.opendatahub.io,resources=datasciencepipelinesapplications/finalizers,verbs=update @@ -278,26 +269,15 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. r.PublishMetrics( dspa, - GetConditionByType(config.APIServerReady, conditions), - GetConditionByType(config.PersistenceAgentReady, conditions), - GetConditionByType(config.ScheduledWorkflowReady, conditions), - GetConditionByType(config.CrReady, conditions), + util.GetConditionByType(config.APIServerReady, conditions), + util.GetConditionByType(config.PersistenceAgentReady, conditions), + util.GetConditionByType(config.ScheduledWorkflowReady, conditions), + util.GetConditionByType(config.CrReady, conditions), ) return ctrl.Result{}, nil } -// GetConditionByType returns condition of type T if it exists in conditions, otherwise -// return empty condition struct. -func GetConditionByType(t string, conditions []metav1.Condition) metav1.Condition { - for _, c := range conditions { - if c.Type == t { - return c - } - } - return metav1.Condition{} -} - // isDeploymentInCondition evaluates if condition with "name" is in condition of type "conditionType". // this procedure is valid only for conditions with bool status type, for conditions of non bool type // results are undefined. @@ -331,9 +311,9 @@ func (r *DSPAReconciler) handleReadyCondition( // 2. Component is still deploying // We check for (1), and if no errors are found we presume (2) - progressingCond := GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) - availableCond := GetDeploymentCondition(deployment.Status, appsv1.DeploymentAvailable) - replicaFailureCond := GetDeploymentCondition(deployment.Status, appsv1.DeploymentReplicaFailure) + progressingCond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) + availableCond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentAvailable) + replicaFailureCond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentReplicaFailure) if availableCond != nil && availableCond.Status == corev1.ConditionTrue { // If this DSPA component is minimally available, we are done. diff --git a/controllers/util/util.go b/controllers/util/util.go new file mode 100644 index 000000000..57e2b4216 --- /dev/null +++ b/controllers/util/util.go @@ -0,0 +1,43 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetConditionByType returns condition of type T if it exists in conditions, otherwise +// return empty condition struct. +func GetConditionByType(t string, conditions []metav1.Condition) metav1.Condition { + for _, c := range conditions { + if c.Type == t { + return c + } + } + return metav1.Condition{} +} + +func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} From a2f47286ba1e4a5433cf3097a9223829e9e7e920 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Wed, 14 Jun 2023 10:29:02 -0400 Subject: [PATCH 13/19] Code cleanup and conventional fixes. Signed-off-by: Humair Khan --- controllers/dspipeline_controller.go | 40 ++++++++++++++-------------- controllers/util/util.go | 9 +++---- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 14f9869f7..e790037b2 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -253,7 +253,7 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, err } - err, conditions := r.GenerateStatus(ctx, dspa) + conditions, err := r.GenerateStatus(ctx, dspa) if err != nil { log.Info(err.Error()) return ctrl.Result{}, err @@ -278,7 +278,7 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, nil } -// isDeploymentInCondition evaluates if condition with "name" is in condition of type "conditionType". +// handleReadyCondition evaluates if condition with "name" is in condition of type "conditionType". // this procedure is valid only for conditions with bool status type, for conditions of non bool type // results are undefined. func (r *DSPAReconciler) handleReadyCondition( @@ -286,7 +286,7 @@ func (r *DSPAReconciler) handleReadyCondition( dspa *dspav1alpha1.DataSciencePipelinesApplication, name string, condition string, -) (error, metav1.Condition) { +) (metav1.Condition, error) { readyCondition := r.buildCondition(condition, dspa, config.MinimumReplicasAvailable) deployment := &appsv1.Deployment{} @@ -295,7 +295,7 @@ func (r *DSPAReconciler) handleReadyCondition( err := r.Get(ctx, types.NamespacedName{Name: component, Namespace: dspa.Namespace}, deployment) if err != nil { - return err, metav1.Condition{} + return metav1.Condition{}, err } // First check if deployment is scaled down, if it is, component is deemed not ready @@ -303,7 +303,7 @@ func (r *DSPAReconciler) handleReadyCondition( readyCondition.Reason = config.MinimumReplicasAvailable readyCondition.Status = metav1.ConditionFalse readyCondition.Message = fmt.Sprintf("Deployment for component \"%s\" is scaled down.", component) - return nil, readyCondition + return readyCondition, nil } // At this point component is not minimally available, possible scenarios: @@ -320,7 +320,7 @@ func (r *DSPAReconciler) handleReadyCondition( readyCondition.Reason = config.MinimumReplicasAvailable readyCondition.Status = metav1.ConditionTrue readyCondition.Message = fmt.Sprintf("Component [%s] is minimally available.", component) - return nil, readyCondition + return readyCondition, nil } // There are two possible reasons for progress failing, deadline and replica create error: @@ -332,7 +332,7 @@ func (r *DSPAReconciler) handleReadyCondition( readyCondition.Status = metav1.ConditionFalse readyCondition.Message = fmt.Sprintf("Component [%s] has failed to progress. Reason: [%s]. "+ "Message: [%s]", component, progressingCond.Reason, progressingCond.Message) - return nil, readyCondition + return readyCondition, nil } if replicaFailureCond != nil && replicaFailureCond.Status == corev1.ConditionTrue { @@ -340,7 +340,7 @@ func (r *DSPAReconciler) handleReadyCondition( readyCondition.Status = metav1.ConditionFalse readyCondition.Message = fmt.Sprintf("Component's replica [%s] has failed to create. Reason: [%s]. "+ "Message: [%s]", component, replicaFailureCond.Reason, replicaFailureCond.Message) - return nil, readyCondition + return readyCondition, nil } // Search through the pods associated with this deployment @@ -352,7 +352,7 @@ func (r *DSPAReconciler) handleReadyCondition( } err = r.Client.List(ctx, podList, opts...) if err != nil { - return err, metav1.Condition{} + return metav1.Condition{}, err } hasPodFailures := false @@ -374,7 +374,7 @@ func (r *DSPAReconciler) handleReadyCondition( // We concatenate messages from all failing containers. readyCondition.Message = fmt.Sprintf("Component [%s] is in CrashLoopBackOff. "+ "Message from pod: [%s]", component, c.State.Waiting.Message) - return nil, readyCondition + return readyCondition, nil } } } @@ -383,7 +383,7 @@ func (r *DSPAReconciler) handleReadyCondition( readyCondition.Status = metav1.ConditionFalse readyCondition.Reason = config.FailingToDeploy readyCondition.Message = podFailureMessage - return nil, readyCondition + return readyCondition, nil } // No errors encountered, assume deployment is progressing successfully @@ -391,23 +391,23 @@ func (r *DSPAReconciler) handleReadyCondition( readyCondition.Reason = config.Deploying readyCondition.Status = metav1.ConditionFalse readyCondition.Message = fmt.Sprintf("Component [%s] is deploying.", component) - return nil, readyCondition + return readyCondition, nil } -func (r *DSPAReconciler) GenerateStatus(ctx context.Context, dspa *dspav1alpha1.DataSciencePipelinesApplication) (error, []metav1.Condition) { +func (r *DSPAReconciler) GenerateStatus(ctx context.Context, dspa *dspav1alpha1.DataSciencePipelinesApplication) ([]metav1.Condition, error) { - err, apiServerReady := r.handleReadyCondition(ctx, dspa, "ds-pipeline", config.APIServerReady) + apiServerReady, err := r.handleReadyCondition(ctx, dspa, "ds-pipeline", config.APIServerReady) if err != nil { - return err, []metav1.Condition{} + return []metav1.Condition{}, err } - err, persistenceAgentReady := r.handleReadyCondition(ctx, dspa, "ds-pipeline-persistenceagent", config.PersistenceAgentReady) + persistenceAgentReady, err := r.handleReadyCondition(ctx, dspa, "ds-pipeline-persistenceagent", config.PersistenceAgentReady) if err != nil { - return err, []metav1.Condition{} + return []metav1.Condition{}, err } - err, scheduledWorkflowReady := r.handleReadyCondition(ctx, dspa, "ds-pipeline-scheduledworkflow", config.ScheduledWorkflowReady) + scheduledWorkflowReady, err := r.handleReadyCondition(ctx, dspa, "ds-pipeline-scheduledworkflow", config.ScheduledWorkflowReady) if err != nil { - return err, []metav1.Condition{} + return []metav1.Condition{}, err } var conditions []metav1.Condition conditions = append(conditions, apiServerReady) @@ -443,7 +443,7 @@ func (r *DSPAReconciler) GenerateStatus(ctx context.Context, dspa *dspav1alpha1. } } - return nil, conditions + return conditions, nil } func (r *DSPAReconciler) PublishMetrics(dspa *dspav1alpha1.DataSciencePipelinesApplication, diff --git a/controllers/util/util.go b/controllers/util/util.go index 57e2b4216..7aef462ee 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -21,11 +21,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// GetConditionByType returns condition of type T if it exists in conditions, otherwise +// GetConditionByType returns condition of type condType if it exists in conditions, otherwise // return empty condition struct. -func GetConditionByType(t string, conditions []metav1.Condition) metav1.Condition { +func GetConditionByType(condType string, conditions []metav1.Condition) metav1.Condition { for _, c := range conditions { - if c.Type == t { + if c.Type == condType { return c } } @@ -33,8 +33,7 @@ func GetConditionByType(t string, conditions []metav1.Condition) metav1.Conditio } func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] + for _, c := range status.Conditions { if c.Type == condType { return &c } From a2f30b4c2c932284070c82bdde140806b910d0bb Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Fri, 16 Jun 2023 13:44:44 -0400 Subject: [PATCH 14/19] Add v0.2.2 tags. Signed-off-by: Humair Khan --- config/base/params.env | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/config/base/params.env b/config/base/params.env index 5605f25a6..c68bd0030 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -1,12 +1,12 @@ -IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:main-0e8a011 -IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main-0e8a011 -IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:main-0e8a011 -IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:main-0e8a011 +IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:v0.2.2 +IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:v0.2.2 +IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:v0.2.2 +IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:v0.2.2 IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1-188 -IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:main +IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:v0.2.2 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy:v4.12.0 -IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:1.7.0 -IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:1.0.0 -IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:1.1.0 +IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:v0.2.2 +IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:v0.2.2 +IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:v0.2.2 From fa94936a72d3b6d81e9fc46257140aac5e85dd72 Mon Sep 17 00:00:00 2001 From: ddalvi Date: Fri, 16 Jun 2023 19:05:27 -0400 Subject: [PATCH 15/19] Revert "Revert "Update DSP Operator labels"" This reverts commit 90524c44bd510e2e58c8b73689bdd1eeea76f335. --- config/manager/manager-service.yaml | 5 ++--- config/manager/manager.yaml | 12 +++--------- config/prometheus/monitor.yaml | 2 +- config/rbac/leader_election_role.yaml | 6 +----- config/rbac/leader_election_role_binding.yaml | 6 +----- config/rbac/role_binding.yaml | 6 +----- config/rbac/service_account.yaml | 6 +----- 7 files changed, 10 insertions(+), 33 deletions(-) diff --git a/config/manager/manager-service.yaml b/config/manager/manager-service.yaml index adcd9dc05..31811b0ae 100644 --- a/config/manager/manager-service.yaml +++ b/config/manager/manager-service.yaml @@ -3,11 +3,10 @@ kind: Service metadata: name: service labels: - control-plane: controller-manager - app.kubernetes.io/created-by: data-science-pipelines-operator + app.kubernetes.io/name: data-science-pipelines-operator spec: ports: - name: metrics port: 8080 selector: - control-plane: controller-manager + app.kubernetes.io/name: data-science-pipelines-operator diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 546488fd0..1f1bc9a76 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -4,24 +4,18 @@ metadata: name: controller-manager namespace: datasciencepipelinesapplications-controller labels: - control-plane: controller-manager - app.kubernetes.io/name: deployment - app.kubernetes.io/instance: controller-manager - app.kubernetes.io/component: manager - app.kubernetes.io/created-by: data-science-pipelines-operator - app.kubernetes.io/part-of: data-science-pipelines-operator + app.kubernetes.io/name: data-science-pipelines-operator spec: selector: matchLabels: - control-plane: controller-manager + app.kubernetes.io/name: data-science-pipelines-operator replicas: 1 template: metadata: annotations: kubectl.kubernetes.io/default-container: manager labels: - control-plane: controller-manager - app.kubernetes.io/created-by: data-science-pipelines-operator + app.kubernetes.io/name: data-science-pipelines-operator spec: securityContext: runAsNonRoot: true diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml index c742ba1fd..a4ab6f10e 100644 --- a/config/prometheus/monitor.yaml +++ b/config/prometheus/monitor.yaml @@ -9,4 +9,4 @@ spec: port: metrics selector: matchLabels: - control-plane: controller-manager + app.kubernetes.io/name: data-science-pipelines-operator diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index 4e306bfee..9ab71b546 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -3,11 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - app.kubernetes.io/name: role - app.kubernetes.io/instance: leader-election-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: data-science-pipelines-operator - app.kubernetes.io/part-of: data-science-pipelines-operator + app.kubernetes.io/name: data-science-pipelines-operator name: leader-election-role rules: - apiGroups: diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index aeca9af60..48370ea06 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -2,11 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - app.kubernetes.io/name: rolebinding - app.kubernetes.io/instance: leader-election-rolebinding - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: data-science-pipelines-operator - app.kubernetes.io/part-of: data-science-pipelines-operator + app.kubernetes.io/name: data-science-pipelines-operator name: leader-election-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 654b18363..eac347dd3 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -2,11 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/instance: manager-rolebinding - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: data-science-pipelines-operator - app.kubernetes.io/part-of: data-science-pipelines-operator + app.kubernetes.io/name: data-science-pipelines-operator name: manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index f5d121810..eabf91acd 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -2,10 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - app.kubernetes.io/name: serviceaccount - app.kubernetes.io/instance: controller-manager - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: data-science-pipelines-operator - app.kubernetes.io/part-of: data-science-pipelines-operator + app.kubernetes.io/name: data-science-pipelines-operator name: controller-manager namespace: datasciencepipelinesapplications-controller From 2620f5eb8aeab798f524804fef75c22991f0f8f6 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Mon, 19 Jun 2023 09:15:22 +0200 Subject: [PATCH 16/19] Fix: update template name to be more unique --- controllers/config/templating.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/config/templating.go b/controllers/config/templating.go index 951ac8b9b..853fd6b5f 100644 --- a/controllers/config/templating.go +++ b/controllers/config/templating.go @@ -51,7 +51,7 @@ func templateSource(r io.Reader, context interface{}) mf.Source { if err != nil { panic(err) } - t, err := template.New("foo").Parse(string(b)) + t, err := template.New("manifestTemplateDSP").Parse(string(b)) if err != nil { panic(err) } From 7e98f6512f0d4a78bbbf452706d28ddbf06e7d0c Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Thu, 22 Jun 2023 16:45:07 -0400 Subject: [PATCH 17/19] Give pipeline runner access to mcad/ray res. Signed-off-by: Humair Khan --- .../apiserver/role_pipeline-runner.yaml.tmpl | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/config/internal/apiserver/role_pipeline-runner.yaml.tmpl b/config/internal/apiserver/role_pipeline-runner.yaml.tmpl index eff729aca..45aa929ec 100644 --- a/config/internal/apiserver/role_pipeline-runner.yaml.tmpl +++ b/config/internal/apiserver/role_pipeline-runner.yaml.tmpl @@ -98,3 +98,25 @@ rules: - update - patch - delete + - apiGroups: + - mcad.ibm.com + resources: + - appwrappers + verbs: + - create + - get + - list + - patch + - delete + - apiGroups: + - ray.io + resources: + - rayclusters + - rayjobs + - rayservices + verbs: + - create + - get + - list + - patch + - delete From 33549b3e4dd7f72f02c7639ffaa6f17bd060a238 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Thu, 22 Jun 2023 18:03:09 -0400 Subject: [PATCH 18/19] Provide dspo with rbac needed to hand out mcad/ray perms. Signed-off-by: Humair Khan --- config/rbac/role.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index b4f743dfb..4ec9ef0ee 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -229,3 +229,25 @@ rules: - '*' verbs: - '*' +- apiGroups: + - mcad.ibm.com + resources: + - appwrappers + verbs: + - create + - get + - list + - patch + - delete +- apiGroups: + - ray.io + resources: + - rayclusters + - rayjobs + - rayservices + verbs: + - create + - get + - list + - patch + - delete From 4d7c39fc2e77c3be2eca306660bcb7297cb800e1 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Thu, 22 Jun 2023 18:05:26 -0400 Subject: [PATCH 19/19] Update image configs to 1.0.0 Signed-off-by: Humair Khan --- config/base/params.env | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/config/base/params.env b/config/base/params.env index c68bd0030..aded367d6 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -1,12 +1,12 @@ -IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:v0.2.2 -IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:v0.2.2 -IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:v0.2.2 -IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:v0.2.2 +IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:v1.0.0 +IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:v1.0.0 +IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:v1.0.0 +IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:v1.0.0 IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1-188 -IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:v0.2.2 +IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:v1.0.0 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy:v4.12.0 -IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:v0.2.2 -IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:v0.2.2 -IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:v0.2.2 +IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:v1.0.0 +IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:v1.0.0 +IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:v1.0.0