diff --git a/integration/init/factorio/expected/.ship/state.json b/integration/init/factorio/expected/.ship/state.json index e3729dae2..4bfbf8cdb 100644 --- a/integration/init/factorio/expected/.ship/state.json +++ b/integration/init/factorio/expected/.ship/state.json @@ -1,7 +1,6 @@ { "v1": { "config": {}, - "helmValues": "# Factorio image version\n# ref: https://quay.io/repository/games_on_k8s/factorio?tab=tags\nimage: quay.io/games_on_k8s/factorio\nimageTag: \"0.14.22\"\n\n# Configure resource requests and limits\n# ref: http://kubernetes.io/docs/user-guide/compute-resources/\nresources:\n requests:\n memory: 512Mi\n cpu: 500m\n\n# Most of these map to environment variables. See docker-factorio for details:\n# https://github.com/games-on-k8s/docker-factorio/blob/master/README.md#environment-variable-reference\nfactorioServer:\n name: Kubernetes Server\n description: Factorio running on Kubernetes\n port: 34197\n # Lock this server down with a password.\n # password: change.me\n maxPlayers: 255\n # Publishes this server in the server browser if true.\n # You'll want to set Factorio.User below if true, as it becomes required.\n isPublic: false\n verifyIdentity: false\n # Allows or disallows console commands. Must be one of: `true`, `false`, or `admins-only`.\n allowCommands: admins-only\n # Pause the server when nobody is connected?\n noAutoPause: \"false\"\n # You'll want to change this to NodePort if you are on AWS.\n serviceType: LoadBalancer\n\n autosave:\n # Auto-save interval in minutes.\n interval: 2\n slots: 3\n\n rcon:\n enabled: false\n port: 27015\n # Empty value here enables an auto-generated password.\n password: \"\"\n serviceType: LoadBalancer\n\nfactorio:\n # Your factorio.com User/pass is needed if factorioServer.IsPublic is true.\n user:\n username: your.username\n password: your.password\n\npersistence:\n ## factorio data Persistent Volume Storage Class\n ## If defined, storageClassName: \u003cstorageClass\u003e\n ## If set to \"-\", storageClassName: \"\", which disables dynamic provisioning\n ## If undefined (the default) or set to null, no storageClassName spec is\n ## set, choosing the default provisioner. (gp2 on AWS, standard on\n ## GKE, AWS \u0026 OpenStack)\n ##\n # storageClass: \"-\"\n savedGames:\n # Set this to false if you don't care to persist saved games between restarts.\n enabled: true\n size: 1Gi\n mods:\n enabled: false\n size: 128Mi\n", "releaseName": "factorio", "helmValuesDefaults": "# Factorio image version\n# ref: https://quay.io/repository/games_on_k8s/factorio?tab=tags\nimage: quay.io/games_on_k8s/factorio\nimageTag: \"0.14.22\"\n\n# Configure resource requests and limits\n# ref: http://kubernetes.io/docs/user-guide/compute-resources/\nresources:\n requests:\n memory: 512Mi\n cpu: 500m\n\n# Most of these map to environment variables. See docker-factorio for details:\n# https://github.com/games-on-k8s/docker-factorio/blob/master/README.md#environment-variable-reference\nfactorioServer:\n name: Kubernetes Server\n description: Factorio running on Kubernetes\n port: 34197\n # Lock this server down with a password.\n # password: change.me\n maxPlayers: 255\n # Publishes this server in the server browser if true.\n # You'll want to set Factorio.User below if true, as it becomes required.\n isPublic: false\n verifyIdentity: false\n # Allows or disallows console commands. Must be one of: `true`, `false`, or `admins-only`.\n allowCommands: admins-only\n # Pause the server when nobody is connected?\n noAutoPause: \"false\"\n # You'll want to change this to NodePort if you are on AWS.\n serviceType: LoadBalancer\n\n autosave:\n # Auto-save interval in minutes.\n interval: 2\n slots: 3\n\n rcon:\n enabled: false\n port: 27015\n # Empty value here enables an auto-generated password.\n password: \"\"\n serviceType: LoadBalancer\n\nfactorio:\n # Your factorio.com User/pass is needed if factorioServer.IsPublic is true.\n user:\n username: your.username\n password: your.password\n\npersistence:\n ## factorio data Persistent Volume Storage Class\n ## If defined, storageClassName: \u003cstorageClass\u003e\n ## If set to \"-\", storageClassName: \"\", which disables dynamic provisioning\n ## If undefined (the default) or set to null, no storageClassName spec is\n ## set, choosing the default provisioner. (gp2 on AWS, standard on\n ## GKE, AWS \u0026 OpenStack)\n ##\n # storageClass: \"-\"\n savedGames:\n # Set this to false if you don't care to persist saved games between restarts.\n enabled: true\n size: 1Gi\n mods:\n enabled: false\n size: 128Mi\n", "upstream": "https://github.com/helm/charts/tree/ffb84f85a861e765caade879491a75a6dd3091a5/stable/factorio", diff --git a/integration/init/git-root-directory/expected/.ship/state.json b/integration/init/git-root-directory/expected/.ship/state.json index 83620ae6c..9fa97a86f 100644 --- a/integration/init/git-root-directory/expected/.ship/state.json +++ b/integration/init/git-root-directory/expected/.ship/state.json @@ -1,7 +1,6 @@ { "v1": { "config": {}, - "helmValues": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n\n", "releaseName": "values-update", "helmValuesDefaults": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n\n", "upstream": "https://github.com/replicatedhq/test-chart-root-dir/tree/507feecae588c958ebe82bcf701b8be63f34ac9b/", diff --git a/integration/init/grafana-with-values/expected/.ship/state.json b/integration/init/grafana-with-values/expected/.ship/state.json index 20cc7ab98..d693b883f 100644 --- a/integration/init/grafana-with-values/expected/.ship/state.json +++ b/integration/init/grafana-with-values/expected/.ship/state.json @@ -1,7 +1,7 @@ { "v1": { "config": {}, - "helmValues": "rbac:\n create: true\n pspEnabled: true\nserviceAccount:\n create: true\n name: null\nreplicas: 1\ndeploymentStrategy: RollingUpdate\nreadinessProbe:\n httpGet:\n path: /api/health\n port: 3000\nlivenessProbe:\n httpGet:\n path: /api/health\n port: 3000\n initialDelaySeconds: 60\n timeoutSeconds: 30\n failureThreshold: 10\nimage:\n repository: grafana/grafana\n tag: 5.3.4\n pullPolicy: IfNotPresent\nsecurityContext:\n runAsUser: 472\n fsGroup: 472\ndownloadDashboardsImage:\n repository: appropriate/curl\n tag: latest\n pullPolicy: IfNotPresent\nservice:\n type: ClusterIP\n port: 80\n annotations: {}\n labels: {}\ningress:\n enabled: false\n annotations: {}\n labels: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\npersistence:\n enabled: true\n storageClassName: default\n accessModes:\n - ReadWriteOnce\n size: 111Gi\nadminUser: admin\nenv: {}\nenvFromSecret: \"\"\nextraSecretMounts: []\nplugins: []\ndatasources: {}\ndashboardProviders: {}\ndashboards: {}\ndashboardsConfigMaps: {}\ngrafana.ini:\n paths:\n data: /var/lib/grafana/data\n logs: /var/log/grafana\n plugins: /var/lib/grafana/plugins\n provisioning: /etc/grafana/provisioning\n analytics:\n check_for_updates: true\n log:\n mode: console\n grafana_net:\n url: https://grafana.net\nldap:\n existingSecret: \"\"\n config: \"\"\nsmtp:\n existingSecret: \"\"\nsidecar:\n image: kiwigrid/k8s-sidecar:0.0.6\n imagePullPolicy: IfNotPresent\n resources: null\n dashboards:\n enabled: false\n label: grafana_dashboard\n folder: /tmp/dashboards\n searchNamespace: null\n datasources:\n enabled: false\n label: grafana_datasource\n searchNamespace: null\nadminPassword: strongpassword\n", + "helmValues": "rbac:\n create: true\n pspEnabled: true\nserviceAccount:\n create: true\n name:\n\nreplicas: 1\n\ndeploymentStrategy: RollingUpdate\n\nreadinessProbe:\n httpGet:\n path: /api/health\n port: 3000\n\nlivenessProbe:\n httpGet:\n path: /api/health\n port: 3000\n initialDelaySeconds: 60\n timeoutSeconds: 30\n failureThreshold: 10\n\nimage:\n repository: grafana/grafana\n tag: 5.3.4\n pullPolicy: IfNotPresent\n\n ## Optionally specify an array of imagePullSecrets.\n ## Secrets must be manually created in the namespace.\n ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n ##\n # pullSecrets:\n # - myRegistrKeySecretName\n\nsecurityContext:\n runAsUser: 472\n fsGroup: 472\n\n## Assign a PriorityClassName to pods if set\n# priorityClassName:\n\ndownloadDashboardsImage:\n repository: appropriate/curl\n tag: latest\n pullPolicy: IfNotPresent\n\n## Pod Annotations\n# podAnnotations: {}\n\n## Deployment annotations\n# annotations: {}\n\n## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).\n## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.\n## ref: http://kubernetes.io/docs/user-guide/services/\n##\nservice:\n type: ClusterIP\n port: 80\n annotations: {}\n labels: {}\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n labels: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n# limits:\n# cpu: 100m\n# memory: 128Mi\n# requests:\n# cpu: 100m\n# memory: 128Mi\n\n## Node labels for pod assignment\n## ref: https://kubernetes.io/docs/user-guide/node-selection/\n#\nnodeSelector: {}\n\n## Tolerations for pod assignment\n## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n##\ntolerations: []\n\n## Affinity for pod assignment\n## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n##\naffinity: {}\n\n## Enable persistence using Persistent Volume Claims\n## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/\n##\npersistence:\n enabled: true\n storageClassName: default\n accessModes:\n - ReadWriteOnce\n size: 111Gi\n # annotations: {}\n # subPath: \"\"\n # existingClaim:\n\nadminUser: admin\nadminPassword: strongpassword\n\n## Use an alternate scheduler, e.g. \"stork\".\n## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/\n##\n# schedulerName:\n\n## Extra environment variables that will be pass onto deployment pods\nenv: {}\n\n## The name of a secret in the same kubernetes namespace which contain values to be added to the environment\n## This can be useful for auth tokens, etc\nenvFromSecret: \"\"\n\n## Additional grafana server secret mounts\n# Defines additional mounts with secrets. Secrets must be manually created in the namespace.\nextraSecretMounts: []\n # - name: secret-files\n # mountPath: /etc/secrets\n # secretName: grafana-secret-files\n # readOnly: true\n\n## Pass the plugins you want installed as a list.\n##\nplugins: []\n # - digrich-bubblechart-panel\n # - grafana-clock-panel\n\n## Configure grafana datasources\n## ref: http://docs.grafana.org/administration/provisioning/#datasources\n##\ndatasources: {}\n# datasources.yaml:\n# apiVersion: 1\n# datasources:\n# - name: Prometheus\n# type: prometheus\n# url: http://prometheus-prometheus-server\n# access: proxy\n# isDefault: true\n\n## Configure grafana dashboard providers\n## ref: http://docs.grafana.org/administration/provisioning/#dashboards\n##\n## `path` must be /var/lib/grafana/dashboards/\u003cprovider_name\u003e\n##\ndashboardProviders: {}\n# dashboardproviders.yaml:\n# apiVersion: 1\n# providers:\n# - name: 'default'\n# orgId: 1\n# folder: ''\n# type: file\n# disableDeletion: false\n# editable: true\n# options:\n# path: /var/lib/grafana/dashboards/default\n\n## Configure grafana dashboard to import\n## NOTE: To use dashboards you must also enable/configure dashboardProviders\n## ref: https://grafana.com/dashboards\n##\n## dashboards per provider, use provider name as key.\n##\ndashboards: {}\n# default:\n# some-dashboard:\n# json: |\n# $RAW_JSON\n# prometheus-stats:\n# gnetId: 2\n# revision: 2\n# datasource: Prometheus\n# local-dashboard:\n# url: https://example.com/repository/test.json\n\n## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value.\n## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.\n## ConfigMap data example:\n##\n## data:\n## example-dashboard.json: |\n## RAW_JSON\n##\ndashboardsConfigMaps: {}\n# default: \"\"\n\n## Grafana's primary configuration\n## NOTE: values in map will be converted to ini format\n## ref: http://docs.grafana.org/installation/configuration/\n##\ngrafana.ini:\n paths:\n data: /var/lib/grafana/data\n logs: /var/log/grafana\n plugins: /var/lib/grafana/plugins\n provisioning: /etc/grafana/provisioning\n analytics:\n check_for_updates: true\n log:\n mode: console\n grafana_net:\n url: https://grafana.net\n## LDAP Authentication can be enabled with the following values on grafana.ini\n## NOTE: Grafana will fail to start if the value for ldap.toml is invalid\n # auth.ldap:\n # enabled: true\n # allow_sign_up: true\n # config_file: /etc/grafana/ldap.toml\n\n## Grafana's LDAP configuration\n## Templated by the template in _helpers.tpl\n## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled\n## ref: http://docs.grafana.org/installation/configuration/#auth-ldap\n## ref: http://docs.grafana.org/installation/ldap/#configuration\nldap:\n # `existingSecret` is a reference to an existing secret containing the ldap configuration\n # for Grafana in a key `ldap-toml`.\n existingSecret: \"\"\n # `config` is the content of `ldap.toml` that will be stored in the created secret\n config: \"\"\n # config: |-\n # verbose_logging = true\n\n # [[servers]]\n # host = \"my-ldap-server\"\n # port = 636\n # use_ssl = true\n # start_tls = false\n # ssl_skip_verify = false\n # bind_dn = \"uid=%s,ou=users,dc=myorg,dc=com\"\n\n## Grafana's SMTP configuration\n## NOTE: To enable, grafana.ini must be configured with smtp.enabled\n## ref: http://docs.grafana.org/installation/configuration/#smtp\nsmtp:\n # `existingSecret` is a reference to an existing secret containing the smtp configuration\n # for Grafana in keys `user` and `password`.\n existingSecret: \"\"\n\n## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders\n## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards\nsidecar:\n image: kiwigrid/k8s-sidecar:0.0.6\n imagePullPolicy: IfNotPresent\n resources:\n# limits:\n# cpu: 100m\n# memory: 100Mi\n# requests:\n# cpu: 50m\n# memory: 50Mi\n dashboards:\n enabled: false\n # label that the configmaps with dashboards are marked with\n label: grafana_dashboard\n # folder in the pod that should hold the collected dashboards\n folder: /tmp/dashboards\n # If specified, the sidecar will search for dashboard config-maps inside this namespace.\n # Otherwise the namespace in which the sidecar is running will be used.\n # It's also possible to specify ALL to search in all namespaces\n searchNamespace: null\n datasources:\n enabled: false\n # label that the configmaps with datasources are marked with\n label: grafana_datasource\n # If specified, the sidecar will search for datasource config-maps inside this namespace.\n # Otherwise the namespace in which the sidecar is running will be used.\n # It's also possible to specify ALL to search in all namespaces\n searchNamespace: null\n", "releaseName": "grafana", "helmValuesDefaults": "rbac:\n create: true\n pspEnabled: true\nserviceAccount:\n create: true\n name:\n\nreplicas: 1\n\ndeploymentStrategy: RollingUpdate\n\nreadinessProbe:\n httpGet:\n path: /api/health\n port: 3000\n\nlivenessProbe:\n httpGet:\n path: /api/health\n port: 3000\n initialDelaySeconds: 60\n timeoutSeconds: 30\n failureThreshold: 10\n\nimage:\n repository: grafana/grafana\n tag: 5.3.4\n pullPolicy: IfNotPresent\n\n ## Optionally specify an array of imagePullSecrets.\n ## Secrets must be manually created in the namespace.\n ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n ##\n # pullSecrets:\n # - myRegistrKeySecretName\n\nsecurityContext:\n runAsUser: 472\n fsGroup: 472\n\n## Assign a PriorityClassName to pods if set\n# priorityClassName:\n\ndownloadDashboardsImage:\n repository: appropriate/curl\n tag: latest\n pullPolicy: IfNotPresent\n\n## Pod Annotations\n# podAnnotations: {}\n\n## Deployment annotations\n# annotations: {}\n\n## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).\n## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.\n## ref: http://kubernetes.io/docs/user-guide/services/\n##\nservice:\n type: ClusterIP\n port: 80\n annotations: {}\n labels: {}\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n labels: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n# limits:\n# cpu: 100m\n# memory: 128Mi\n# requests:\n# cpu: 100m\n# memory: 128Mi\n\n## Node labels for pod assignment\n## ref: https://kubernetes.io/docs/user-guide/node-selection/\n#\nnodeSelector: {}\n\n## Tolerations for pod assignment\n## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n##\ntolerations: []\n\n## Affinity for pod assignment\n## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n##\naffinity: {}\n\n## Enable persistence using Persistent Volume Claims\n## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/\n##\npersistence:\n enabled: false\n # storageClassName: default\n # accessModes:\n # - ReadWriteOnce\n # size: 10Gi\n # annotations: {}\n # subPath: \"\"\n # existingClaim:\n\nadminUser: admin\n# adminPassword: strongpassword\n\n## Use an alternate scheduler, e.g. \"stork\".\n## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/\n##\n# schedulerName:\n\n## Extra environment variables that will be pass onto deployment pods\nenv: {}\n\n## The name of a secret in the same kubernetes namespace which contain values to be added to the environment\n## This can be useful for auth tokens, etc\nenvFromSecret: \"\"\n\n## Additional grafana server secret mounts\n# Defines additional mounts with secrets. Secrets must be manually created in the namespace.\nextraSecretMounts: []\n # - name: secret-files\n # mountPath: /etc/secrets\n # secretName: grafana-secret-files\n # readOnly: true\n\n## Pass the plugins you want installed as a list.\n##\nplugins: []\n # - digrich-bubblechart-panel\n # - grafana-clock-panel\n\n## Configure grafana datasources\n## ref: http://docs.grafana.org/administration/provisioning/#datasources\n##\ndatasources: {}\n# datasources.yaml:\n# apiVersion: 1\n# datasources:\n# - name: Prometheus\n# type: prometheus\n# url: http://prometheus-prometheus-server\n# access: proxy\n# isDefault: true\n\n## Configure grafana dashboard providers\n## ref: http://docs.grafana.org/administration/provisioning/#dashboards\n##\n## `path` must be /var/lib/grafana/dashboards/\u003cprovider_name\u003e\n##\ndashboardProviders: {}\n# dashboardproviders.yaml:\n# apiVersion: 1\n# providers:\n# - name: 'default'\n# orgId: 1\n# folder: ''\n# type: file\n# disableDeletion: false\n# editable: true\n# options:\n# path: /var/lib/grafana/dashboards/default\n\n## Configure grafana dashboard to import\n## NOTE: To use dashboards you must also enable/configure dashboardProviders\n## ref: https://grafana.com/dashboards\n##\n## dashboards per provider, use provider name as key.\n##\ndashboards: {}\n# default:\n# some-dashboard:\n# json: |\n# $RAW_JSON\n# prometheus-stats:\n# gnetId: 2\n# revision: 2\n# datasource: Prometheus\n# local-dashboard:\n# url: https://example.com/repository/test.json\n\n## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value.\n## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.\n## ConfigMap data example:\n##\n## data:\n## example-dashboard.json: |\n## RAW_JSON\n##\ndashboardsConfigMaps: {}\n# default: \"\"\n\n## Grafana's primary configuration\n## NOTE: values in map will be converted to ini format\n## ref: http://docs.grafana.org/installation/configuration/\n##\ngrafana.ini:\n paths:\n data: /var/lib/grafana/data\n logs: /var/log/grafana\n plugins: /var/lib/grafana/plugins\n provisioning: /etc/grafana/provisioning\n analytics:\n check_for_updates: true\n log:\n mode: console\n grafana_net:\n url: https://grafana.net\n## LDAP Authentication can be enabled with the following values on grafana.ini\n## NOTE: Grafana will fail to start if the value for ldap.toml is invalid\n # auth.ldap:\n # enabled: true\n # allow_sign_up: true\n # config_file: /etc/grafana/ldap.toml\n\n## Grafana's LDAP configuration\n## Templated by the template in _helpers.tpl\n## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled\n## ref: http://docs.grafana.org/installation/configuration/#auth-ldap\n## ref: http://docs.grafana.org/installation/ldap/#configuration\nldap:\n # `existingSecret` is a reference to an existing secret containing the ldap configuration\n # for Grafana in a key `ldap-toml`.\n existingSecret: \"\"\n # `config` is the content of `ldap.toml` that will be stored in the created secret\n config: \"\"\n # config: |-\n # verbose_logging = true\n\n # [[servers]]\n # host = \"my-ldap-server\"\n # port = 636\n # use_ssl = true\n # start_tls = false\n # ssl_skip_verify = false\n # bind_dn = \"uid=%s,ou=users,dc=myorg,dc=com\"\n\n## Grafana's SMTP configuration\n## NOTE: To enable, grafana.ini must be configured with smtp.enabled\n## ref: http://docs.grafana.org/installation/configuration/#smtp\nsmtp:\n # `existingSecret` is a reference to an existing secret containing the smtp configuration\n # for Grafana in keys `user` and `password`.\n existingSecret: \"\"\n\n## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders\n## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards\nsidecar:\n image: kiwigrid/k8s-sidecar:0.0.6\n imagePullPolicy: IfNotPresent\n resources:\n# limits:\n# cpu: 100m\n# memory: 100Mi\n# requests:\n# cpu: 50m\n# memory: 50Mi\n dashboards:\n enabled: false\n # label that the configmaps with dashboards are marked with\n label: grafana_dashboard\n # folder in the pod that should hold the collected dashboards\n folder: /tmp/dashboards\n # If specified, the sidecar will search for dashboard config-maps inside this namespace.\n # Otherwise the namespace in which the sidecar is running will be used.\n # It's also possible to specify ALL to search in all namespaces\n searchNamespace: null\n datasources:\n enabled: false\n # label that the configmaps with datasources are marked with\n label: grafana_datasource\n # If specified, the sidecar will search for datasource config-maps inside this namespace.\n # Otherwise the namespace in which the sidecar is running will be used.\n # It's also possible to specify ALL to search in all namespaces\n searchNamespace: null\n", "upstream": "https://github.com/helm/charts/tree/353ba5ef6467fd64035b7d5446df426f86d60153/stable/grafana", diff --git a/integration/init/istio-1.0.3/expected/.ship/state.json b/integration/init/istio-1.0.3/expected/.ship/state.json index 544c2b692..b0bc2df60 100644 --- a/integration/init/istio-1.0.3/expected/.ship/state.json +++ b/integration/init/istio-1.0.3/expected/.ship/state.json @@ -1,7 +1,6 @@ { "v1": { "config": {}, - "helmValues": "# Common settings.\nglobal:\n # Default hub for Istio images.\n # Releases are published to docker hub under 'istio' project.\n # Daily builds from prow are on gcr.io, and nightly builds from circle on docker.io/istionightly\n hub: gcr.io/istio-release\n\n # Default tag for Istio images.\n tag: release-1.0-latest-daily\n\n # Gateway used for legacy k8s Ingress resources. By default it is\n # using 'istio:ingress', to match 0.8 config. It requires that\n # ingress.enabled is set to true. You can also set it\n # to ingressgateway, or any other gateway you define in the 'gateway'\n # section.\n k8sIngressSelector: ingress\n\n # k8sIngressHttps will add port 443 on the ingress and ingressgateway.\n # It REQUIRES that the certificates are installed in the\n # expected secrets - enabling this option without certificates\n # will result in LDS rejection and the ingress will not work.\n k8sIngressHttps: false\n\n proxy:\n image: proxyv2\n\n # Resources for the sidecar.\n resources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Controls number of Proxy worker threads.\n # If set to 0 (default), then start worker thread for each CPU thread/core.\n concurrency: 0\n\n # Configures the access log for each sidecar. Setting it to an empty string will\n # disable access log for sidecar.\n accessLogFile: \"/dev/stdout\"\n\n #If set to true, istio-proxy container will have privileged securityContext\n privileged: false\n\n # If set, newly injected sidecars will have core dumps enabled. Core dumps will always be written to the same\n # file to prevent storage filling up indefinitely. Add a timestamp option to core_pattern to keep all cores:\n # e.g. sysctl -w kernel.core_pattern=/var/lib/istio/core.%e.%p.%t\n enableCoreDump: false\n\n # Default port for Pilot agent health checks. A value of 0 will disable health checking.\n # statusPort: 15020\n statusPort: 0\n\n # The initial delay for readiness probes in seconds.\n readinessInitialDelaySeconds: 1\n\n # The period between readiness probes.\n readinessPeriodSeconds: 2\n\n # The number of successive failed probes before indicating readiness failure.\n readinessFailureThreshold: 30\n\n # istio egress capture whitelist\n # https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly\n # example: includeIPRanges: \"172.30.0.0/16,172.20.0.0/16\"\n # would only capture egress traffic on those two IP Ranges, all other outbound traffic would\n # be allowed by the sidecar\n includeIPRanges: \"*\"\n excludeIPRanges: \"\"\n\n # istio ingress capture whitelist\n # examples:\n # Redirect no inbound traffic to Envoy: --includeInboundPorts=\"\"\n # Redirect all inbound traffic to Envoy: --includeInboundPorts=\"*\"\n # Redirect only selected ports: --includeInboundPorts=\"80,8080\"\n includeInboundPorts: \"*\"\n excludeInboundPorts: \"\"\n\n # This controls the 'policy' in the sidecar injector.\n autoInject: enabled\n\n # Sets the destination Statsd in envoy (the value of the \"--statsdUdpAddress\" proxy argument\n # would be \u003chost\u003e:\u003cport\u003e).\n # Disabled by default.\n # The istio-statsd-prom-bridge is deprecated and should not be used moving forward.\n envoyStatsd:\n # If enabled is set to true, host and port must also be provided. Istio no longer provides a statsd collector.\n enabled: false\n host: # example: statsd-svc\n port: # example: 9125\n\n # This controls the stats collection for proxies. To disable stats\n # collection, set the prometheusPort to 0.\n stats:\n prometheusPort: 15090\n\n proxy_init:\n # Base name for the proxy_init container, used to configure iptables.\n image: proxy_init\n\n # imagePullPolicy is applied to istio control plane components.\n # local tests require IfNotPresent, to avoid uploading to dockerhub.\n # TODO: Switch to Always as default, and override in the local tests.\n imagePullPolicy: IfNotPresent\n\n # controlPlaneMtls enabled. Will result in delays starting the pods while secrets are\n # propagated, not recommended for tests.\n controlPlaneSecurityEnabled: false\n\n # disablePolicyChecks disables mixer policy checks.\n # Will set the value with same name in istio config map - pilot needs to be restarted to take effect.\n disablePolicyChecks: false\n\n # policyCheckFailOpen allows traffic in cases when the mixer policy service cannot be reached.\n # Default is false which means the traffic is denied when the client is unable to connect to Mixer.\n policyCheckFailOpen: false\n\n # EnableTracing sets the value with same name in istio config map, requires pilot restart to take effect.\n enableTracing: true\n\n # Default mtls policy. If true, mtls between services will be enabled by default.\n mtls:\n # Default setting for service-to-service mtls. Can be set explicitly using\n # destination rules or service annotations.\n enabled: false\n\n # ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace\n # to use for pulling any images in pods that reference this ServiceAccount.\n # Must be set for any clustser configured with privte docker registry.\n imagePullSecrets:\n # - private-registry-key\n\n # Specify pod scheduling arch(amd64, ppc64le, s390x) and weight as follows:\n # 0 - Never scheduled\n # 1 - Least preferred\n # 2 - No preference\n # 3 - Most preferred\n arch:\n amd64: 2\n s390x: 2\n ppc64le: 2\n\n # Whether to restrict the applications namespace the controller manages;\n # If not set, controller watches all namespaces\n oneNamespace: false\n\n # Whether to perform server-side validation of configuration.\n configValidation: true\n\n # If set to true, the pilot and citadel mtls will be exposed on the\n # ingress gateway\n meshExpansion: false\n\n # If set to true, the pilot and citadel mtls and the plain text pilot ports\n # will be exposed on an internal gateway\n meshExpansionILB: false\n\n # A minimal set of requested resources to applied to all deployments so that\n # Horizontal Pod Autoscaler will be able to function (if set).\n # Each component can overwrite these default values by adding its own resources\n # block in the relevant section below and setting the desired resources values.\n defaultResources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Not recommended for user to configure this. Hyperkube image to use when creating custom resources\n hyperkube:\n hub: quay.io/coreos\n tag: v1.7.6_coreos.0\n\n # Kubernetes \u003e=v1.11.0 will create two PriorityClass, including system-cluster-critical and\n # system-node-critical, it is better to configure this in order to make sure your Istio pods\n # will not be killed because of low prioroty class.\n # Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass\n # for more detail.\n priorityClassName: \"\"\n\n # Include the crd definition when generating the template.\n # For 'helm template' and helm install \u003e 2.10 it should be true.\n # For helm \u003c 2.9, crds must be installed ahead of time with\n # 'kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml\n # and this options must be set off.\n crds: true\n\n#\n# ingress configuration\n#\ningress:\n enabled: false\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n service:\n annotations: {}\n loadBalancerIP: \"\"\n type: LoadBalancer #change to NodePort, ClusterIP or LoadBalancer if need be\n # Uncomment the following line to preserve client source ip.\n # externalTrafficPolicy: Local\n ports:\n - port: 80\n name: http\n nodePort: 32000\n - port: 443\n name: https\n selector:\n istio: ingress\n\n#\n# Gateways Configuration\n# By default (if enabled) a pair of Ingress and Egress Gateways will be created for the mesh.\n# You can add more gateways in addition to the defaults but make sure those are uniquely named\n# and that NodePorts are not conflicting.\n# Disable specifc gateway by setting the `enabled` to false.\n#\ngateways:\n enabled: true\n\n istio-ingressgateway:\n enabled: true\n labels:\n app: istio-ingressgateway\n istio: ingressgateway\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n resources: {}\n # limits:\n # cpu: 100m\n # memory: 128Mi\n #requests:\n # cpu: 1800m\n # memory: 256Mi\n cpu:\n targetAverageUtilization: 80\n loadBalancerIP: \"\"\n serviceAnnotations: {}\n type: LoadBalancer #change to NodePort, ClusterIP or LoadBalancer if need be\n # Uncomment the following line to preserve client source ip.\n # externalTrafficPolicy: Local\n\n ports:\n ## You can add custom gateway ports\n - port: 80\n targetPort: 80\n name: http2\n nodePort: 31380\n - port: 443\n name: https\n nodePort: 31390\n - port: 31400\n name: tcp\n nodePort: 31400\n # Pilot and Citadel MTLS ports are enabled in gateway - but will only redirect\n # to pilot/citadel if global.meshExpansion settings are enabled.\n - port: 15011\n targetPort: 15011\n name: tcp-pilot-grpc-tls\n - port: 8060\n targetPort: 8060\n name: tcp-citadel-grpc-tls\n - port: 853\n targetPort: 853\n name: tcp-dns-tls\n - port: 15030\n targetPort: 15030\n name: http2-prometheus\n - port: 15031\n targetPort: 15031\n name: http2-grafana\n secretVolumes:\n - name: ingressgateway-certs\n secretName: istio-ingressgateway-certs\n mountPath: /etc/istio/ingressgateway-certs\n - name: ingressgateway-ca-certs\n secretName: istio-ingressgateway-ca-certs\n mountPath: /etc/istio/ingressgateway-ca-certs\n\n istio-egressgateway:\n enabled: true\n labels:\n app: istio-egressgateway\n istio: egressgateway\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n cpu:\n targetAverageUtilization: 80\n serviceAnnotations: {}\n type: ClusterIP #change to NodePort or LoadBalancer if need be\n ports:\n - port: 80\n name: http2\n - port: 443\n name: https\n secretVolumes:\n - name: egressgateway-certs\n secretName: istio-egressgateway-certs\n mountPath: /etc/istio/egressgateway-certs\n - name: egressgateway-ca-certs\n secretName: istio-egressgateway-ca-certs\n mountPath: /etc/istio/egressgateway-ca-certs\n\n # Mesh ILB gateway creates a gateway of type InternalLoadBalancer,\n # for mesh expansion. It exposes the mtls ports for Pilot,CA as well\n # as non-mtls ports to support upgrades and gradual transition.\n istio-ilbgateway:\n enabled: false\n labels:\n app: istio-ilbgateway\n istio: ilbgateway\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n resources:\n requests:\n cpu: 800m\n memory: 512Mi\n #limits:\n # cpu: 1800m\n # memory: 256Mi\n cpu:\n targetAverageUtilization: 80 \n loadBalancerIP: \"\"\n serviceAnnotations:\n cloud.google.com/load-balancer-type: \"internal\"\n type: LoadBalancer\n ports:\n ## You can add custom gateway ports - google ILB default quota is 5 ports,\n - port: 15011\n name: grpc-pilot-mtls\n # Insecure port - only for migration from 0.8. Will be removed in 1.1\n - port: 15010\n name: grpc-pilot\n - port: 8060\n targetPort: 8060\n name: tcp-citadel-grpc-tls\n # Port 853 is reserved for the kube-dns gateway\n - port: 853\n name: tcp-dns\n secretVolumes:\n - name: ilbgateway-certs\n secretName: istio-ilbgateway-certs\n mountPath: /etc/istio/ilbgateway-certs\n - name: ilbgateway-ca-certs\n secretName: istio-ilbgateway-ca-certs\n mountPath: /etc/istio/ilbgateway-ca-certs\n\n#\n# sidecar-injector webhook configuration\n#\nsidecarInjectorWebhook:\n enabled: true\n replicaCount: 1\n image: sidecar_injector\n enableNamespacesByDefault: false\n\n#\n# galley configuration\n#\ngalley:\n enabled: true\n replicaCount: 1\n image: galley\n\n#\n# mixer configuration\n#\nmixer:\n enabled: true\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n image: mixer\n\n env:\n GODEBUG: gctrace=2\n\n istio-policy:\n autoscaleEnabled: true\n autoscaleMin: 1\n autoscaleMax: 5\n cpu:\n targetAverageUtilization: 80\n\n istio-telemetry:\n autoscaleEnabled: true\n autoscaleMin: 1\n autoscaleMax: 5\n cpu:\n targetAverageUtilization: 80\n\n prometheusStatsdExporter:\n hub: docker.io/prom\n tag: v0.6.0\n\n#\n# pilot configuration\n#\npilot:\n enabled: true\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n image: pilot\n sidecar: true\n traceSampling: 1.0\n # Resources for a small pilot install\n resources:\n requests:\n cpu: 500m\n memory: 2048Mi\n env:\n PILOT_PUSH_THROTTLE_COUNT: 100\n GODEBUG: gctrace=2\n cpu:\n targetAverageUtilization: 80\n\n#\n# security configuration\n#\nsecurity:\n replicaCount: 1\n image: citadel\n selfSigned: true # indicate if self-signed CA is used.\n\n#\n# addons configuration\n#\ntelemetry-gateway:\n gatewayName: ingressgateway\n grafanaEnabled: false\n prometheusEnabled: false\n\ngrafana:\n enabled: false\n replicaCount: 1\n image:\n repository: grafana/grafana\n tag: 5.2.3\n persist: false\n storageClassName: \"\"\n security:\n enabled: false\n adminUser: admin\n adminPassword: admin\n service:\n annotations: {}\n name: http\n type: ClusterIP\n externalPort: 3000\n internalPort: 3000\n\nprometheus:\n enabled: true\n replicaCount: 1\n hub: docker.io/prom\n tag: v2.3.1\n\n service:\n annotations: {}\n nodePort:\n enabled: false\n port: 32090\n\nservicegraph:\n enabled: false\n replicaCount: 1\n image: servicegraph\n service:\n annotations: {}\n name: http\n type: ClusterIP\n externalPort: 8088\n internalPort: 8088\n ingress:\n enabled: false\n # Used to create an Ingress record.\n hosts:\n - servicegraph.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: servicegraph-tls\n # hosts:\n # - servicegraph.local\n # prometheus addres\n prometheusAddr: http://prometheus:9090\n\ntracing:\n enabled: false\n provider: jaeger\n jaeger:\n hub: docker.io/jaegertracing\n tag: 1.5\n memory:\n max_traces: 50000\n ui:\n port: 16686\n ingress:\n enabled: false\n # Used to create an Ingress record.\n hosts:\n - jaeger.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: jaeger-tls\n # hosts:\n # - jaeger.local\n replicaCount: 1\n service:\n annotations: {}\n name: http\n type: ClusterIP\n externalPort: 9411\n internalPort: 9411\n ingress:\n enabled: false\n # Used to create an Ingress record.\n hosts:\n - tracing.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: tracing-tls\n # hosts:\n # - tracing.local\n\nkiali:\n enabled: false\n replicaCount: 1\n hub: docker.io/kiali\n tag: v0.9\n ingress:\n enabled: false\n ## Used to create an Ingress record.\n # hosts:\n # - kiali.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: kiali-tls\n # hosts:\n # - kiali.local\n dashboard:\n username: admin\n # Default admin passphrase for kiali. Must be set during setup, and\n # changed by overriding the secret\n passphrase: admin\n\n # Override the automatically detected Grafana URL, usefull when Grafana service has no ExternalIPs\n # grafanaURL:\n\n # Override the automatically detected Jaeger URL, usefull when Jaeger service has no ExternalIPs\n # jaegerURL:\n\n# Certmanager uses ACME to sign certificates. Since Istio gateways are\n# mounting the TLS secrets the Certificate CRDs must be created in the\n# istio-system namespace. Once the certificate has been created, the\n# gateway must be updated by adding 'secretVolumes'. After the gateway\n# restart, DestinationRules can be created using the ACME-signed certificates.\ncertmanager:\n enabled: false\n hub: quay.io/jetstack\n tag: v0.3.1\n resources: {}\n", "releaseName": "istio", "helmValuesDefaults": "# Common settings.\nglobal:\n # Default hub for Istio images.\n # Releases are published to docker hub under 'istio' project.\n # Daily builds from prow are on gcr.io, and nightly builds from circle on docker.io/istionightly\n hub: gcr.io/istio-release\n\n # Default tag for Istio images.\n tag: release-1.0-latest-daily\n\n # Gateway used for legacy k8s Ingress resources. By default it is\n # using 'istio:ingress', to match 0.8 config. It requires that\n # ingress.enabled is set to true. You can also set it\n # to ingressgateway, or any other gateway you define in the 'gateway'\n # section.\n k8sIngressSelector: ingress\n\n # k8sIngressHttps will add port 443 on the ingress and ingressgateway.\n # It REQUIRES that the certificates are installed in the\n # expected secrets - enabling this option without certificates\n # will result in LDS rejection and the ingress will not work.\n k8sIngressHttps: false\n\n proxy:\n image: proxyv2\n\n # Resources for the sidecar.\n resources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Controls number of Proxy worker threads.\n # If set to 0 (default), then start worker thread for each CPU thread/core.\n concurrency: 0\n\n # Configures the access log for each sidecar. Setting it to an empty string will\n # disable access log for sidecar.\n accessLogFile: \"/dev/stdout\"\n\n #If set to true, istio-proxy container will have privileged securityContext\n privileged: false\n\n # If set, newly injected sidecars will have core dumps enabled. Core dumps will always be written to the same\n # file to prevent storage filling up indefinitely. Add a timestamp option to core_pattern to keep all cores:\n # e.g. sysctl -w kernel.core_pattern=/var/lib/istio/core.%e.%p.%t\n enableCoreDump: false\n\n # Default port for Pilot agent health checks. A value of 0 will disable health checking.\n # statusPort: 15020\n statusPort: 0\n\n # The initial delay for readiness probes in seconds.\n readinessInitialDelaySeconds: 1\n\n # The period between readiness probes.\n readinessPeriodSeconds: 2\n\n # The number of successive failed probes before indicating readiness failure.\n readinessFailureThreshold: 30\n\n # istio egress capture whitelist\n # https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly\n # example: includeIPRanges: \"172.30.0.0/16,172.20.0.0/16\"\n # would only capture egress traffic on those two IP Ranges, all other outbound traffic would\n # be allowed by the sidecar\n includeIPRanges: \"*\"\n excludeIPRanges: \"\"\n\n # istio ingress capture whitelist\n # examples:\n # Redirect no inbound traffic to Envoy: --includeInboundPorts=\"\"\n # Redirect all inbound traffic to Envoy: --includeInboundPorts=\"*\"\n # Redirect only selected ports: --includeInboundPorts=\"80,8080\"\n includeInboundPorts: \"*\"\n excludeInboundPorts: \"\"\n\n # This controls the 'policy' in the sidecar injector.\n autoInject: enabled\n\n # Sets the destination Statsd in envoy (the value of the \"--statsdUdpAddress\" proxy argument\n # would be \u003chost\u003e:\u003cport\u003e).\n # Disabled by default.\n # The istio-statsd-prom-bridge is deprecated and should not be used moving forward.\n envoyStatsd:\n # If enabled is set to true, host and port must also be provided. Istio no longer provides a statsd collector.\n enabled: false\n host: # example: statsd-svc\n port: # example: 9125\n\n # This controls the stats collection for proxies. To disable stats\n # collection, set the prometheusPort to 0.\n stats:\n prometheusPort: 15090\n\n proxy_init:\n # Base name for the proxy_init container, used to configure iptables.\n image: proxy_init\n\n # imagePullPolicy is applied to istio control plane components.\n # local tests require IfNotPresent, to avoid uploading to dockerhub.\n # TODO: Switch to Always as default, and override in the local tests.\n imagePullPolicy: IfNotPresent\n\n # controlPlaneMtls enabled. Will result in delays starting the pods while secrets are\n # propagated, not recommended for tests.\n controlPlaneSecurityEnabled: false\n\n # disablePolicyChecks disables mixer policy checks.\n # Will set the value with same name in istio config map - pilot needs to be restarted to take effect.\n disablePolicyChecks: false\n\n # policyCheckFailOpen allows traffic in cases when the mixer policy service cannot be reached.\n # Default is false which means the traffic is denied when the client is unable to connect to Mixer.\n policyCheckFailOpen: false\n\n # EnableTracing sets the value with same name in istio config map, requires pilot restart to take effect.\n enableTracing: true\n\n # Default mtls policy. If true, mtls between services will be enabled by default.\n mtls:\n # Default setting for service-to-service mtls. Can be set explicitly using\n # destination rules or service annotations.\n enabled: false\n\n # ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace\n # to use for pulling any images in pods that reference this ServiceAccount.\n # Must be set for any clustser configured with privte docker registry.\n imagePullSecrets:\n # - private-registry-key\n\n # Specify pod scheduling arch(amd64, ppc64le, s390x) and weight as follows:\n # 0 - Never scheduled\n # 1 - Least preferred\n # 2 - No preference\n # 3 - Most preferred\n arch:\n amd64: 2\n s390x: 2\n ppc64le: 2\n\n # Whether to restrict the applications namespace the controller manages;\n # If not set, controller watches all namespaces\n oneNamespace: false\n\n # Whether to perform server-side validation of configuration.\n configValidation: true\n\n # If set to true, the pilot and citadel mtls will be exposed on the\n # ingress gateway\n meshExpansion: false\n\n # If set to true, the pilot and citadel mtls and the plain text pilot ports\n # will be exposed on an internal gateway\n meshExpansionILB: false\n\n # A minimal set of requested resources to applied to all deployments so that\n # Horizontal Pod Autoscaler will be able to function (if set).\n # Each component can overwrite these default values by adding its own resources\n # block in the relevant section below and setting the desired resources values.\n defaultResources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Not recommended for user to configure this. Hyperkube image to use when creating custom resources\n hyperkube:\n hub: quay.io/coreos\n tag: v1.7.6_coreos.0\n\n # Kubernetes \u003e=v1.11.0 will create two PriorityClass, including system-cluster-critical and\n # system-node-critical, it is better to configure this in order to make sure your Istio pods\n # will not be killed because of low prioroty class.\n # Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass\n # for more detail.\n priorityClassName: \"\"\n\n # Include the crd definition when generating the template.\n # For 'helm template' and helm install \u003e 2.10 it should be true.\n # For helm \u003c 2.9, crds must be installed ahead of time with\n # 'kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml\n # and this options must be set off.\n crds: true\n\n#\n# ingress configuration\n#\ningress:\n enabled: false\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n service:\n annotations: {}\n loadBalancerIP: \"\"\n type: LoadBalancer #change to NodePort, ClusterIP or LoadBalancer if need be\n # Uncomment the following line to preserve client source ip.\n # externalTrafficPolicy: Local\n ports:\n - port: 80\n name: http\n nodePort: 32000\n - port: 443\n name: https\n selector:\n istio: ingress\n\n#\n# Gateways Configuration\n# By default (if enabled) a pair of Ingress and Egress Gateways will be created for the mesh.\n# You can add more gateways in addition to the defaults but make sure those are uniquely named\n# and that NodePorts are not conflicting.\n# Disable specifc gateway by setting the `enabled` to false.\n#\ngateways:\n enabled: true\n\n istio-ingressgateway:\n enabled: true\n labels:\n app: istio-ingressgateway\n istio: ingressgateway\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n resources: {}\n # limits:\n # cpu: 100m\n # memory: 128Mi\n #requests:\n # cpu: 1800m\n # memory: 256Mi\n cpu:\n targetAverageUtilization: 80\n loadBalancerIP: \"\"\n serviceAnnotations: {}\n type: LoadBalancer #change to NodePort, ClusterIP or LoadBalancer if need be\n # Uncomment the following line to preserve client source ip.\n # externalTrafficPolicy: Local\n\n ports:\n ## You can add custom gateway ports\n - port: 80\n targetPort: 80\n name: http2\n nodePort: 31380\n - port: 443\n name: https\n nodePort: 31390\n - port: 31400\n name: tcp\n nodePort: 31400\n # Pilot and Citadel MTLS ports are enabled in gateway - but will only redirect\n # to pilot/citadel if global.meshExpansion settings are enabled.\n - port: 15011\n targetPort: 15011\n name: tcp-pilot-grpc-tls\n - port: 8060\n targetPort: 8060\n name: tcp-citadel-grpc-tls\n - port: 853\n targetPort: 853\n name: tcp-dns-tls\n - port: 15030\n targetPort: 15030\n name: http2-prometheus\n - port: 15031\n targetPort: 15031\n name: http2-grafana\n secretVolumes:\n - name: ingressgateway-certs\n secretName: istio-ingressgateway-certs\n mountPath: /etc/istio/ingressgateway-certs\n - name: ingressgateway-ca-certs\n secretName: istio-ingressgateway-ca-certs\n mountPath: /etc/istio/ingressgateway-ca-certs\n\n istio-egressgateway:\n enabled: true\n labels:\n app: istio-egressgateway\n istio: egressgateway\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n cpu:\n targetAverageUtilization: 80\n serviceAnnotations: {}\n type: ClusterIP #change to NodePort or LoadBalancer if need be\n ports:\n - port: 80\n name: http2\n - port: 443\n name: https\n secretVolumes:\n - name: egressgateway-certs\n secretName: istio-egressgateway-certs\n mountPath: /etc/istio/egressgateway-certs\n - name: egressgateway-ca-certs\n secretName: istio-egressgateway-ca-certs\n mountPath: /etc/istio/egressgateway-ca-certs\n\n # Mesh ILB gateway creates a gateway of type InternalLoadBalancer,\n # for mesh expansion. It exposes the mtls ports for Pilot,CA as well\n # as non-mtls ports to support upgrades and gradual transition.\n istio-ilbgateway:\n enabled: false\n labels:\n app: istio-ilbgateway\n istio: ilbgateway\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n resources:\n requests:\n cpu: 800m\n memory: 512Mi\n #limits:\n # cpu: 1800m\n # memory: 256Mi\n cpu:\n targetAverageUtilization: 80 \n loadBalancerIP: \"\"\n serviceAnnotations:\n cloud.google.com/load-balancer-type: \"internal\"\n type: LoadBalancer\n ports:\n ## You can add custom gateway ports - google ILB default quota is 5 ports,\n - port: 15011\n name: grpc-pilot-mtls\n # Insecure port - only for migration from 0.8. Will be removed in 1.1\n - port: 15010\n name: grpc-pilot\n - port: 8060\n targetPort: 8060\n name: tcp-citadel-grpc-tls\n # Port 853 is reserved for the kube-dns gateway\n - port: 853\n name: tcp-dns\n secretVolumes:\n - name: ilbgateway-certs\n secretName: istio-ilbgateway-certs\n mountPath: /etc/istio/ilbgateway-certs\n - name: ilbgateway-ca-certs\n secretName: istio-ilbgateway-ca-certs\n mountPath: /etc/istio/ilbgateway-ca-certs\n\n#\n# sidecar-injector webhook configuration\n#\nsidecarInjectorWebhook:\n enabled: true\n replicaCount: 1\n image: sidecar_injector\n enableNamespacesByDefault: false\n\n#\n# galley configuration\n#\ngalley:\n enabled: true\n replicaCount: 1\n image: galley\n\n#\n# mixer configuration\n#\nmixer:\n enabled: true\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n image: mixer\n\n env:\n GODEBUG: gctrace=2\n\n istio-policy:\n autoscaleEnabled: true\n autoscaleMin: 1\n autoscaleMax: 5\n cpu:\n targetAverageUtilization: 80\n\n istio-telemetry:\n autoscaleEnabled: true\n autoscaleMin: 1\n autoscaleMax: 5\n cpu:\n targetAverageUtilization: 80\n\n prometheusStatsdExporter:\n hub: docker.io/prom\n tag: v0.6.0\n\n#\n# pilot configuration\n#\npilot:\n enabled: true\n replicaCount: 1\n autoscaleMin: 1\n autoscaleMax: 5\n image: pilot\n sidecar: true\n traceSampling: 1.0\n # Resources for a small pilot install\n resources:\n requests:\n cpu: 500m\n memory: 2048Mi\n env:\n PILOT_PUSH_THROTTLE_COUNT: 100\n GODEBUG: gctrace=2\n cpu:\n targetAverageUtilization: 80\n\n#\n# security configuration\n#\nsecurity:\n replicaCount: 1\n image: citadel\n selfSigned: true # indicate if self-signed CA is used.\n\n#\n# addons configuration\n#\ntelemetry-gateway:\n gatewayName: ingressgateway\n grafanaEnabled: false\n prometheusEnabled: false\n\ngrafana:\n enabled: false\n replicaCount: 1\n image:\n repository: grafana/grafana\n tag: 5.2.3\n persist: false\n storageClassName: \"\"\n security:\n enabled: false\n adminUser: admin\n adminPassword: admin\n service:\n annotations: {}\n name: http\n type: ClusterIP\n externalPort: 3000\n internalPort: 3000\n\nprometheus:\n enabled: true\n replicaCount: 1\n hub: docker.io/prom\n tag: v2.3.1\n\n service:\n annotations: {}\n nodePort:\n enabled: false\n port: 32090\n\nservicegraph:\n enabled: false\n replicaCount: 1\n image: servicegraph\n service:\n annotations: {}\n name: http\n type: ClusterIP\n externalPort: 8088\n internalPort: 8088\n ingress:\n enabled: false\n # Used to create an Ingress record.\n hosts:\n - servicegraph.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: servicegraph-tls\n # hosts:\n # - servicegraph.local\n # prometheus addres\n prometheusAddr: http://prometheus:9090\n\ntracing:\n enabled: false\n provider: jaeger\n jaeger:\n hub: docker.io/jaegertracing\n tag: 1.5\n memory:\n max_traces: 50000\n ui:\n port: 16686\n ingress:\n enabled: false\n # Used to create an Ingress record.\n hosts:\n - jaeger.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: jaeger-tls\n # hosts:\n # - jaeger.local\n replicaCount: 1\n service:\n annotations: {}\n name: http\n type: ClusterIP\n externalPort: 9411\n internalPort: 9411\n ingress:\n enabled: false\n # Used to create an Ingress record.\n hosts:\n - tracing.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: tracing-tls\n # hosts:\n # - tracing.local\n\nkiali:\n enabled: false\n replicaCount: 1\n hub: docker.io/kiali\n tag: v0.9\n ingress:\n enabled: false\n ## Used to create an Ingress record.\n # hosts:\n # - kiali.local\n annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: kiali-tls\n # hosts:\n # - kiali.local\n dashboard:\n username: admin\n # Default admin passphrase for kiali. Must be set during setup, and\n # changed by overriding the secret\n passphrase: admin\n\n # Override the automatically detected Grafana URL, usefull when Grafana service has no ExternalIPs\n # grafanaURL:\n\n # Override the automatically detected Jaeger URL, usefull when Jaeger service has no ExternalIPs\n # jaegerURL:\n\n# Certmanager uses ACME to sign certificates. Since Istio gateways are\n# mounting the TLS secrets the Certificate CRDs must be created in the\n# istio-system namespace. Once the certificate has been created, the\n# gateway must be updated by adding 'secretVolumes'. After the gateway\n# restart, DestinationRules can be created using the ACME-signed certificates.\ncertmanager:\n enabled: false\n hub: quay.io/jetstack\n tag: v0.3.1\n resources: {}\n", "upstream": "https://github.com/istio/istio/tree/_latest_/install/kubernetes/helm/istio", diff --git a/integration/init/istio-gogetter/expected/.ship/state.json b/integration/init/istio-gogetter/expected/.ship/state.json index d9293cb4d..690d4f8c3 100644 --- a/integration/init/istio-gogetter/expected/.ship/state.json +++ b/integration/init/istio-gogetter/expected/.ship/state.json @@ -1,7 +1,6 @@ { "v1": { "config": {}, - "helmValues": "#\n# Gateways Configuration, refer to the charts/gateways/values.yaml\n# for detailed configuration\n#\ngateways:\n enabled: true\n\n#\n# sidecar-injector webhook configuration, refer to the\n# charts/sidecarInjectorWebhook/values.yaml for detailed configuration\n#\nsidecarInjectorWebhook:\n enabled: true\n\n#\n# galley configuration, refer to charts/galley/values.yaml\n# for detailed configuration\n#\ngalley:\n enabled: true\n\n#\n# mixer configuration\n#\nmixer:\n enabled: true\n\n#\n# pilot configuration\n#\npilot:\n enabled: true\n\n#\n# security configuration\n#\nsecurity:\n enabled: true\n\n#\n# nodeagent configuration\n#\nnodeagent:\n enabled: false\n\n#\n# ingress configuration\n#\ningress:\n enabled: false\n\n#\n# addon grafana configuration\n#\ngrafana:\n enabled: false\n\n#\n# addon prometheus configuration\n#\nprometheus:\n enabled: true\n\n#\n# addon servicegraph configuration\n#\nservicegraph:\n enabled: false\n\n#\n# addon jaeger tracing configuration\n#\ntracing:\n enabled: false\n\n#\n# addon kiali tracing configuration\n#\nkiali:\n enabled: false\n\n# Common settings used among istio subcharts.\nglobal:\n # Default hub for Istio images.\n # Releases are published to docker hub under 'istio' project.\n # Daily builds from prow are on gcr.io, and nightly builds from circle on docker.io/istionightly\n hub: gcr.io/istio-release\n\n # Default tag for Istio images.\n tag: master-latest-daily\n\n k8sIngress:\n enabled: false\n # Gateway used for legacy k8s Ingress resources. By default it is\n # using 'istio:ingress', to match 0.8 config. It requires that\n # ingress.enabled is set to true. You can also set it\n # to ingressgateway, or any other gateway you define in the 'gateway'\n # section.\n gatewayName: ingress\n # enableHttps will add port 443 on the ingress.\n # It REQUIRES that the certificates are installed in the\n # expected secrets - enabling this option without certificates\n # will result in LDS rejection and the ingress will not work.\n enableHttps: false\n\n proxy:\n image: proxyv2\n\n # DNS domain suffix for pilot proxy agent. Default value is \"${POD_NAMESPACE}.svc.cluster.local\".\n proxyDomain: \"\"\n\n # DNS domain suffix for pilot proxy discovery. Default value is \"cluster.local\".\n discoveryDomain: \"\"\n\n # Resources for the sidecar.\n resources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Controls number of Proxy worker threads.\n # If set to 0 (default), then start worker thread for each CPU thread/core.\n concurrency: 0\n\n # Configures the access log for each sidecar. Setting it to an empty string will\n # disable access log for sidecar.\n accessLogFile: \"/dev/stdout\"\n\n #If set to true, istio-proxy container will have privileged securityContext\n privileged: false\n\n # If set, newly injected sidecars will have core dumps enabled.\n enableCoreDump: false\n\n # Default port for Pilot agent health checks. A value of 0 will disable health checking.\n statusPort: 15020\n\n # The initial delay for readiness probes in seconds.\n readinessInitialDelaySeconds: 1\n\n # The period between readiness probes.\n readinessPeriodSeconds: 2\n\n # The number of successive failed probes before indicating readiness failure.\n readinessFailureThreshold: 30\n\n # istio egress capture whitelist\n # https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly\n # example: includeIPRanges: \"172.30.0.0/16,172.20.0.0/16\"\n # would only capture egress traffic on those two IP Ranges, all other outbound traffic would\n # be allowed by the sidecar\n includeIPRanges: \"*\"\n excludeIPRanges: \"\"\n\n # istio ingress capture whitelist\n # examples:\n # Redirect no inbound traffic to Envoy: --includeInboundPorts=\"\"\n # Redirect all inbound traffic to Envoy: --includeInboundPorts=\"*\"\n # Redirect only selected ports: --includeInboundPorts=\"80,8080\"\n includeInboundPorts: \"*\"\n excludeInboundPorts: \"\"\n\n # This controls the 'policy' in the sidecar injector.\n autoInject: enabled\n\n # Sets the destination Statsd in envoy (the value of the \"--statsdUdpAddress\" proxy argument\n # would be \u003chost\u003e:\u003cport\u003e).\n # Disabled by default.\n # The istio-statsd-prom-bridge is deprecated and should not be used moving forward.\n envoyStatsd:\n # If enabled is set to true, host and port must also be provided. Istio no longer provides a statsd collector.\n enabled: false\n host: # example: statsd-svc\n port: # example: 9125\n\n # This controls the stats collection for proxies. To disable stats\n # collection, set the prometheusPort to 0.\n stats:\n prometheusPort: 15090\n\n # Specify which tracer to use. One of: lightstep, zipkin\n tracer: \"zipkin\"\n\n proxy_init:\n # Base name for the proxy_init container, used to configure iptables.\n image: proxy_init\n\n # imagePullPolicy is applied to istio control plane components.\n # local tests require IfNotPresent, to avoid uploading to dockerhub.\n # TODO: Switch to Always as default, and override in the local tests.\n imagePullPolicy: IfNotPresent\n\n # controlPlaneMtls enabled. Will result in delays starting the pods while secrets are\n # propagated, not recommended for tests.\n controlPlaneSecurityEnabled: false\n\n # SDS enabled. IF set to true, mTLS certificates for the sidecars will be\n # distributed through the SecretDiscoveryService instead of using K8S secrets to mount the certificates.\n sdsEnabled: false\n\n # disablePolicyChecks disables mixer policy checks.\n # Will set the value with same name in istio config map - pilot needs to be restarted to take effect.\n disablePolicyChecks: false\n\n # EnableTracing sets the value with same name in istio config map, requires pilot restart to take effect.\n enableTracing: true\n\n # Configuration for each of the supported tracers\n tracer:\n # Configuration for envoy to send trace data to LightStep.\n # Disabled by default.\n # address: the \u003chost\u003e:\u003cport\u003e of the satellite pool\n # accessToken: required for sending data to the pool\n # secure: specifies whether data should be sent with TLS\n # cacertPath: the path to the file containing the cacert to use when verifying TLS. If secure is true, this is\n # required. If a value is specified then a secret called \"lightstep.cacert\" must be created in the destination\n # namespace with the key matching the base of the provided cacertPath and the value being the cacert itself.\n #\n lightstep:\n address: \"\" # example: lightstep-satellite:443\n accessToken: \"\" # example: abcdefg1234567\n secure: true # example: true|false\n cacertPath: \"\" # example: /etc/lightstep/cacert.pem\n zipkin:\n # Host:Port for reporting trace data in zipkin format. If not specified, will default to\n # zipkin service (port 9411) in the same namespace as the other istio components.\n address: \"\"\n\n # Default mtls policy. If true, mtls between services will be enabled by default.\n mtls:\n # Default setting for service-to-service mtls. Can be set explicitly using\n # destination rules or service annotations.\n enabled: false\n\n # ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace\n # to use for pulling any images in pods that reference this ServiceAccount.\n # For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)\n # ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.\n # Must be set for any clustser configured with private docker registry.\n imagePullSecrets:\n # - private-registry-key\n\n # Specify pod scheduling arch(amd64, ppc64le, s390x) and weight as follows:\n # 0 - Never scheduled\n # 1 - Least preferred\n # 2 - No preference\n # 3 - Most preferred\n arch:\n amd64: 2\n s390x: 2\n ppc64le: 2\n\n # Whether to restrict the applications namespace the controller manages;\n # If not set, controller watches all namespaces\n oneNamespace: false\n\n # Whether to perform server-side validation of configuration.\n configValidation: true\n\n # Custom DNS config for the pod to resolve names of services in other\n # clusters. Use this to add additional search domains, and other settings.\n # see\n # https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config\n # This does not apply to gateway pods as they typically need a different\n # set of DNS settings than the normal application pods (e.g., in\n # multicluster scenarios).\n #podDNSConfig:\n # dnsConfig:\n # searches: #some dummy examples\n # - foo.bar.baz\n # - {{ \"[[ valueOrDefault .DeploymentMeta.Namespace \\\"default\\\" ]]\" }}.bazoo\n\n # If set to true, the pilot and citadel mtls will be exposed on the\n # ingress gateway\n meshExpansion:\n enabled: false\n # If set to true, the pilot and citadel mtls and the plain text pilot ports\n # will be exposed on an internal gateway\n useILB: false\n\n multiCluster:\n # Set to true to connect two kubernetes clusters using a LB gateway as\n # the only entry point into the cluster (instead of requiring pod to\n # pod connectivity across two clusters). Note that for this system to\n # work, service objects from remote clusters have to be replicated to\n # local cluster (without the pod selectors). In addition, service\n # entries have to be added for each replicated service object, where\n # the endpoints in the service entry point to the remote cluster's\n # mcgatewayIP:15443. All clusters should be using Istio mTLS and must\n # have a shared root CA for this model to work.\n connectUsingGateway: false\n\n # A minimal set of requested resources to applied to all deployments so that\n # Horizontal Pod Autoscaler will be able to function (if set).\n # Each component can overwrite these default values by adding its own resources\n # block in the relevant section below and setting the desired resources values.\n defaultResources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Kubernetes \u003e=v1.11.0 will create two PriorityClass, including system-cluster-critical and\n # system-node-critical, it is better to configure this in order to make sure your Istio pods\n # will not be killed because of low priority class.\n # Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass\n # for more detail.\n priorityClassName: \"\"\n\n # Include the crd definition when generating the template.\n # For 'helm template' and helm install \u003e 2.10 it should be true.\n # For helm \u003c 2.9, crds must be installed ahead of time with\n # 'kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml\n # and this options must be set off.\n crds: true\n\n # Use the Mesh Control Protocol (MCP) for configuring Mixer and\n # Pilot. Requires galley (`--set galley.enabled=true`).\n useMCP: false\n", "releaseName": "istio", "helmValuesDefaults": "#\n# Gateways Configuration, refer to the charts/gateways/values.yaml\n# for detailed configuration\n#\ngateways:\n enabled: true\n\n#\n# sidecar-injector webhook configuration, refer to the\n# charts/sidecarInjectorWebhook/values.yaml for detailed configuration\n#\nsidecarInjectorWebhook:\n enabled: true\n\n#\n# galley configuration, refer to charts/galley/values.yaml\n# for detailed configuration\n#\ngalley:\n enabled: true\n\n#\n# mixer configuration\n#\nmixer:\n enabled: true\n\n#\n# pilot configuration\n#\npilot:\n enabled: true\n\n#\n# security configuration\n#\nsecurity:\n enabled: true\n\n#\n# nodeagent configuration\n#\nnodeagent:\n enabled: false\n\n#\n# ingress configuration\n#\ningress:\n enabled: false\n\n#\n# addon grafana configuration\n#\ngrafana:\n enabled: false\n\n#\n# addon prometheus configuration\n#\nprometheus:\n enabled: true\n\n#\n# addon servicegraph configuration\n#\nservicegraph:\n enabled: false\n\n#\n# addon jaeger tracing configuration\n#\ntracing:\n enabled: false\n\n#\n# addon kiali tracing configuration\n#\nkiali:\n enabled: false\n\n# Common settings used among istio subcharts.\nglobal:\n # Default hub for Istio images.\n # Releases are published to docker hub under 'istio' project.\n # Daily builds from prow are on gcr.io, and nightly builds from circle on docker.io/istionightly\n hub: gcr.io/istio-release\n\n # Default tag for Istio images.\n tag: master-latest-daily\n\n k8sIngress:\n enabled: false\n # Gateway used for legacy k8s Ingress resources. By default it is\n # using 'istio:ingress', to match 0.8 config. It requires that\n # ingress.enabled is set to true. You can also set it\n # to ingressgateway, or any other gateway you define in the 'gateway'\n # section.\n gatewayName: ingress\n # enableHttps will add port 443 on the ingress.\n # It REQUIRES that the certificates are installed in the\n # expected secrets - enabling this option without certificates\n # will result in LDS rejection and the ingress will not work.\n enableHttps: false\n\n proxy:\n image: proxyv2\n\n # DNS domain suffix for pilot proxy agent. Default value is \"${POD_NAMESPACE}.svc.cluster.local\".\n proxyDomain: \"\"\n\n # DNS domain suffix for pilot proxy discovery. Default value is \"cluster.local\".\n discoveryDomain: \"\"\n\n # Resources for the sidecar.\n resources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Controls number of Proxy worker threads.\n # If set to 0 (default), then start worker thread for each CPU thread/core.\n concurrency: 0\n\n # Configures the access log for each sidecar. Setting it to an empty string will\n # disable access log for sidecar.\n accessLogFile: \"/dev/stdout\"\n\n #If set to true, istio-proxy container will have privileged securityContext\n privileged: false\n\n # If set, newly injected sidecars will have core dumps enabled.\n enableCoreDump: false\n\n # Default port for Pilot agent health checks. A value of 0 will disable health checking.\n statusPort: 15020\n\n # The initial delay for readiness probes in seconds.\n readinessInitialDelaySeconds: 1\n\n # The period between readiness probes.\n readinessPeriodSeconds: 2\n\n # The number of successive failed probes before indicating readiness failure.\n readinessFailureThreshold: 30\n\n # istio egress capture whitelist\n # https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly\n # example: includeIPRanges: \"172.30.0.0/16,172.20.0.0/16\"\n # would only capture egress traffic on those two IP Ranges, all other outbound traffic would\n # be allowed by the sidecar\n includeIPRanges: \"*\"\n excludeIPRanges: \"\"\n\n # istio ingress capture whitelist\n # examples:\n # Redirect no inbound traffic to Envoy: --includeInboundPorts=\"\"\n # Redirect all inbound traffic to Envoy: --includeInboundPorts=\"*\"\n # Redirect only selected ports: --includeInboundPorts=\"80,8080\"\n includeInboundPorts: \"*\"\n excludeInboundPorts: \"\"\n\n # This controls the 'policy' in the sidecar injector.\n autoInject: enabled\n\n # Sets the destination Statsd in envoy (the value of the \"--statsdUdpAddress\" proxy argument\n # would be \u003chost\u003e:\u003cport\u003e).\n # Disabled by default.\n # The istio-statsd-prom-bridge is deprecated and should not be used moving forward.\n envoyStatsd:\n # If enabled is set to true, host and port must also be provided. Istio no longer provides a statsd collector.\n enabled: false\n host: # example: statsd-svc\n port: # example: 9125\n\n # This controls the stats collection for proxies. To disable stats\n # collection, set the prometheusPort to 0.\n stats:\n prometheusPort: 15090\n\n # Specify which tracer to use. One of: lightstep, zipkin\n tracer: \"zipkin\"\n\n proxy_init:\n # Base name for the proxy_init container, used to configure iptables.\n image: proxy_init\n\n # imagePullPolicy is applied to istio control plane components.\n # local tests require IfNotPresent, to avoid uploading to dockerhub.\n # TODO: Switch to Always as default, and override in the local tests.\n imagePullPolicy: IfNotPresent\n\n # controlPlaneMtls enabled. Will result in delays starting the pods while secrets are\n # propagated, not recommended for tests.\n controlPlaneSecurityEnabled: false\n\n # SDS enabled. IF set to true, mTLS certificates for the sidecars will be\n # distributed through the SecretDiscoveryService instead of using K8S secrets to mount the certificates.\n sdsEnabled: false\n\n # disablePolicyChecks disables mixer policy checks.\n # Will set the value with same name in istio config map - pilot needs to be restarted to take effect.\n disablePolicyChecks: false\n\n # EnableTracing sets the value with same name in istio config map, requires pilot restart to take effect.\n enableTracing: true\n\n # Configuration for each of the supported tracers\n tracer:\n # Configuration for envoy to send trace data to LightStep.\n # Disabled by default.\n # address: the \u003chost\u003e:\u003cport\u003e of the satellite pool\n # accessToken: required for sending data to the pool\n # secure: specifies whether data should be sent with TLS\n # cacertPath: the path to the file containing the cacert to use when verifying TLS. If secure is true, this is\n # required. If a value is specified then a secret called \"lightstep.cacert\" must be created in the destination\n # namespace with the key matching the base of the provided cacertPath and the value being the cacert itself.\n #\n lightstep:\n address: \"\" # example: lightstep-satellite:443\n accessToken: \"\" # example: abcdefg1234567\n secure: true # example: true|false\n cacertPath: \"\" # example: /etc/lightstep/cacert.pem\n zipkin:\n # Host:Port for reporting trace data in zipkin format. If not specified, will default to\n # zipkin service (port 9411) in the same namespace as the other istio components.\n address: \"\"\n\n # Default mtls policy. If true, mtls between services will be enabled by default.\n mtls:\n # Default setting for service-to-service mtls. Can be set explicitly using\n # destination rules or service annotations.\n enabled: false\n\n # ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace\n # to use for pulling any images in pods that reference this ServiceAccount.\n # For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)\n # ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.\n # Must be set for any clustser configured with private docker registry.\n imagePullSecrets:\n # - private-registry-key\n\n # Specify pod scheduling arch(amd64, ppc64le, s390x) and weight as follows:\n # 0 - Never scheduled\n # 1 - Least preferred\n # 2 - No preference\n # 3 - Most preferred\n arch:\n amd64: 2\n s390x: 2\n ppc64le: 2\n\n # Whether to restrict the applications namespace the controller manages;\n # If not set, controller watches all namespaces\n oneNamespace: false\n\n # Whether to perform server-side validation of configuration.\n configValidation: true\n\n # Custom DNS config for the pod to resolve names of services in other\n # clusters. Use this to add additional search domains, and other settings.\n # see\n # https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config\n # This does not apply to gateway pods as they typically need a different\n # set of DNS settings than the normal application pods (e.g., in\n # multicluster scenarios).\n #podDNSConfig:\n # dnsConfig:\n # searches: #some dummy examples\n # - foo.bar.baz\n # - {{ \"[[ valueOrDefault .DeploymentMeta.Namespace \\\"default\\\" ]]\" }}.bazoo\n\n # If set to true, the pilot and citadel mtls will be exposed on the\n # ingress gateway\n meshExpansion:\n enabled: false\n # If set to true, the pilot and citadel mtls and the plain text pilot ports\n # will be exposed on an internal gateway\n useILB: false\n\n multiCluster:\n # Set to true to connect two kubernetes clusters using a LB gateway as\n # the only entry point into the cluster (instead of requiring pod to\n # pod connectivity across two clusters). Note that for this system to\n # work, service objects from remote clusters have to be replicated to\n # local cluster (without the pod selectors). In addition, service\n # entries have to be added for each replicated service object, where\n # the endpoints in the service entry point to the remote cluster's\n # mcgatewayIP:15443. All clusters should be using Istio mTLS and must\n # have a shared root CA for this model to work.\n connectUsingGateway: false\n\n # A minimal set of requested resources to applied to all deployments so that\n # Horizontal Pod Autoscaler will be able to function (if set).\n # Each component can overwrite these default values by adding its own resources\n # block in the relevant section below and setting the desired resources values.\n defaultResources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Kubernetes \u003e=v1.11.0 will create two PriorityClass, including system-cluster-critical and\n # system-node-critical, it is better to configure this in order to make sure your Istio pods\n # will not be killed because of low priority class.\n # Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass\n # for more detail.\n priorityClassName: \"\"\n\n # Include the crd definition when generating the template.\n # For 'helm template' and helm install \u003e 2.10 it should be true.\n # For helm \u003c 2.9, crds must be installed ahead of time with\n # 'kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml\n # and this options must be set off.\n crds: true\n\n # Use the Mesh Control Protocol (MCP) for configuring Mixer and\n # Pilot. Requires galley (`--set galley.enabled=true`).\n useMCP: false\n", "upstream": "https://github.com/istio/istio/tree/d3eed9a347ad02f0b79e3f92330878f88953cf64/install/kubernetes/helm/istio", diff --git a/integration/init/istio/expected/.ship/state.json b/integration/init/istio/expected/.ship/state.json index d9293cb4d..690d4f8c3 100644 --- a/integration/init/istio/expected/.ship/state.json +++ b/integration/init/istio/expected/.ship/state.json @@ -1,7 +1,6 @@ { "v1": { "config": {}, - "helmValues": "#\n# Gateways Configuration, refer to the charts/gateways/values.yaml\n# for detailed configuration\n#\ngateways:\n enabled: true\n\n#\n# sidecar-injector webhook configuration, refer to the\n# charts/sidecarInjectorWebhook/values.yaml for detailed configuration\n#\nsidecarInjectorWebhook:\n enabled: true\n\n#\n# galley configuration, refer to charts/galley/values.yaml\n# for detailed configuration\n#\ngalley:\n enabled: true\n\n#\n# mixer configuration\n#\nmixer:\n enabled: true\n\n#\n# pilot configuration\n#\npilot:\n enabled: true\n\n#\n# security configuration\n#\nsecurity:\n enabled: true\n\n#\n# nodeagent configuration\n#\nnodeagent:\n enabled: false\n\n#\n# ingress configuration\n#\ningress:\n enabled: false\n\n#\n# addon grafana configuration\n#\ngrafana:\n enabled: false\n\n#\n# addon prometheus configuration\n#\nprometheus:\n enabled: true\n\n#\n# addon servicegraph configuration\n#\nservicegraph:\n enabled: false\n\n#\n# addon jaeger tracing configuration\n#\ntracing:\n enabled: false\n\n#\n# addon kiali tracing configuration\n#\nkiali:\n enabled: false\n\n# Common settings used among istio subcharts.\nglobal:\n # Default hub for Istio images.\n # Releases are published to docker hub under 'istio' project.\n # Daily builds from prow are on gcr.io, and nightly builds from circle on docker.io/istionightly\n hub: gcr.io/istio-release\n\n # Default tag for Istio images.\n tag: master-latest-daily\n\n k8sIngress:\n enabled: false\n # Gateway used for legacy k8s Ingress resources. By default it is\n # using 'istio:ingress', to match 0.8 config. It requires that\n # ingress.enabled is set to true. You can also set it\n # to ingressgateway, or any other gateway you define in the 'gateway'\n # section.\n gatewayName: ingress\n # enableHttps will add port 443 on the ingress.\n # It REQUIRES that the certificates are installed in the\n # expected secrets - enabling this option without certificates\n # will result in LDS rejection and the ingress will not work.\n enableHttps: false\n\n proxy:\n image: proxyv2\n\n # DNS domain suffix for pilot proxy agent. Default value is \"${POD_NAMESPACE}.svc.cluster.local\".\n proxyDomain: \"\"\n\n # DNS domain suffix for pilot proxy discovery. Default value is \"cluster.local\".\n discoveryDomain: \"\"\n\n # Resources for the sidecar.\n resources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Controls number of Proxy worker threads.\n # If set to 0 (default), then start worker thread for each CPU thread/core.\n concurrency: 0\n\n # Configures the access log for each sidecar. Setting it to an empty string will\n # disable access log for sidecar.\n accessLogFile: \"/dev/stdout\"\n\n #If set to true, istio-proxy container will have privileged securityContext\n privileged: false\n\n # If set, newly injected sidecars will have core dumps enabled.\n enableCoreDump: false\n\n # Default port for Pilot agent health checks. A value of 0 will disable health checking.\n statusPort: 15020\n\n # The initial delay for readiness probes in seconds.\n readinessInitialDelaySeconds: 1\n\n # The period between readiness probes.\n readinessPeriodSeconds: 2\n\n # The number of successive failed probes before indicating readiness failure.\n readinessFailureThreshold: 30\n\n # istio egress capture whitelist\n # https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly\n # example: includeIPRanges: \"172.30.0.0/16,172.20.0.0/16\"\n # would only capture egress traffic on those two IP Ranges, all other outbound traffic would\n # be allowed by the sidecar\n includeIPRanges: \"*\"\n excludeIPRanges: \"\"\n\n # istio ingress capture whitelist\n # examples:\n # Redirect no inbound traffic to Envoy: --includeInboundPorts=\"\"\n # Redirect all inbound traffic to Envoy: --includeInboundPorts=\"*\"\n # Redirect only selected ports: --includeInboundPorts=\"80,8080\"\n includeInboundPorts: \"*\"\n excludeInboundPorts: \"\"\n\n # This controls the 'policy' in the sidecar injector.\n autoInject: enabled\n\n # Sets the destination Statsd in envoy (the value of the \"--statsdUdpAddress\" proxy argument\n # would be \u003chost\u003e:\u003cport\u003e).\n # Disabled by default.\n # The istio-statsd-prom-bridge is deprecated and should not be used moving forward.\n envoyStatsd:\n # If enabled is set to true, host and port must also be provided. Istio no longer provides a statsd collector.\n enabled: false\n host: # example: statsd-svc\n port: # example: 9125\n\n # This controls the stats collection for proxies. To disable stats\n # collection, set the prometheusPort to 0.\n stats:\n prometheusPort: 15090\n\n # Specify which tracer to use. One of: lightstep, zipkin\n tracer: \"zipkin\"\n\n proxy_init:\n # Base name for the proxy_init container, used to configure iptables.\n image: proxy_init\n\n # imagePullPolicy is applied to istio control plane components.\n # local tests require IfNotPresent, to avoid uploading to dockerhub.\n # TODO: Switch to Always as default, and override in the local tests.\n imagePullPolicy: IfNotPresent\n\n # controlPlaneMtls enabled. Will result in delays starting the pods while secrets are\n # propagated, not recommended for tests.\n controlPlaneSecurityEnabled: false\n\n # SDS enabled. IF set to true, mTLS certificates for the sidecars will be\n # distributed through the SecretDiscoveryService instead of using K8S secrets to mount the certificates.\n sdsEnabled: false\n\n # disablePolicyChecks disables mixer policy checks.\n # Will set the value with same name in istio config map - pilot needs to be restarted to take effect.\n disablePolicyChecks: false\n\n # EnableTracing sets the value with same name in istio config map, requires pilot restart to take effect.\n enableTracing: true\n\n # Configuration for each of the supported tracers\n tracer:\n # Configuration for envoy to send trace data to LightStep.\n # Disabled by default.\n # address: the \u003chost\u003e:\u003cport\u003e of the satellite pool\n # accessToken: required for sending data to the pool\n # secure: specifies whether data should be sent with TLS\n # cacertPath: the path to the file containing the cacert to use when verifying TLS. If secure is true, this is\n # required. If a value is specified then a secret called \"lightstep.cacert\" must be created in the destination\n # namespace with the key matching the base of the provided cacertPath and the value being the cacert itself.\n #\n lightstep:\n address: \"\" # example: lightstep-satellite:443\n accessToken: \"\" # example: abcdefg1234567\n secure: true # example: true|false\n cacertPath: \"\" # example: /etc/lightstep/cacert.pem\n zipkin:\n # Host:Port for reporting trace data in zipkin format. If not specified, will default to\n # zipkin service (port 9411) in the same namespace as the other istio components.\n address: \"\"\n\n # Default mtls policy. If true, mtls between services will be enabled by default.\n mtls:\n # Default setting for service-to-service mtls. Can be set explicitly using\n # destination rules or service annotations.\n enabled: false\n\n # ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace\n # to use for pulling any images in pods that reference this ServiceAccount.\n # For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)\n # ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.\n # Must be set for any clustser configured with private docker registry.\n imagePullSecrets:\n # - private-registry-key\n\n # Specify pod scheduling arch(amd64, ppc64le, s390x) and weight as follows:\n # 0 - Never scheduled\n # 1 - Least preferred\n # 2 - No preference\n # 3 - Most preferred\n arch:\n amd64: 2\n s390x: 2\n ppc64le: 2\n\n # Whether to restrict the applications namespace the controller manages;\n # If not set, controller watches all namespaces\n oneNamespace: false\n\n # Whether to perform server-side validation of configuration.\n configValidation: true\n\n # Custom DNS config for the pod to resolve names of services in other\n # clusters. Use this to add additional search domains, and other settings.\n # see\n # https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config\n # This does not apply to gateway pods as they typically need a different\n # set of DNS settings than the normal application pods (e.g., in\n # multicluster scenarios).\n #podDNSConfig:\n # dnsConfig:\n # searches: #some dummy examples\n # - foo.bar.baz\n # - {{ \"[[ valueOrDefault .DeploymentMeta.Namespace \\\"default\\\" ]]\" }}.bazoo\n\n # If set to true, the pilot and citadel mtls will be exposed on the\n # ingress gateway\n meshExpansion:\n enabled: false\n # If set to true, the pilot and citadel mtls and the plain text pilot ports\n # will be exposed on an internal gateway\n useILB: false\n\n multiCluster:\n # Set to true to connect two kubernetes clusters using a LB gateway as\n # the only entry point into the cluster (instead of requiring pod to\n # pod connectivity across two clusters). Note that for this system to\n # work, service objects from remote clusters have to be replicated to\n # local cluster (without the pod selectors). In addition, service\n # entries have to be added for each replicated service object, where\n # the endpoints in the service entry point to the remote cluster's\n # mcgatewayIP:15443. All clusters should be using Istio mTLS and must\n # have a shared root CA for this model to work.\n connectUsingGateway: false\n\n # A minimal set of requested resources to applied to all deployments so that\n # Horizontal Pod Autoscaler will be able to function (if set).\n # Each component can overwrite these default values by adding its own resources\n # block in the relevant section below and setting the desired resources values.\n defaultResources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Kubernetes \u003e=v1.11.0 will create two PriorityClass, including system-cluster-critical and\n # system-node-critical, it is better to configure this in order to make sure your Istio pods\n # will not be killed because of low priority class.\n # Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass\n # for more detail.\n priorityClassName: \"\"\n\n # Include the crd definition when generating the template.\n # For 'helm template' and helm install \u003e 2.10 it should be true.\n # For helm \u003c 2.9, crds must be installed ahead of time with\n # 'kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml\n # and this options must be set off.\n crds: true\n\n # Use the Mesh Control Protocol (MCP) for configuring Mixer and\n # Pilot. Requires galley (`--set galley.enabled=true`).\n useMCP: false\n", "releaseName": "istio", "helmValuesDefaults": "#\n# Gateways Configuration, refer to the charts/gateways/values.yaml\n# for detailed configuration\n#\ngateways:\n enabled: true\n\n#\n# sidecar-injector webhook configuration, refer to the\n# charts/sidecarInjectorWebhook/values.yaml for detailed configuration\n#\nsidecarInjectorWebhook:\n enabled: true\n\n#\n# galley configuration, refer to charts/galley/values.yaml\n# for detailed configuration\n#\ngalley:\n enabled: true\n\n#\n# mixer configuration\n#\nmixer:\n enabled: true\n\n#\n# pilot configuration\n#\npilot:\n enabled: true\n\n#\n# security configuration\n#\nsecurity:\n enabled: true\n\n#\n# nodeagent configuration\n#\nnodeagent:\n enabled: false\n\n#\n# ingress configuration\n#\ningress:\n enabled: false\n\n#\n# addon grafana configuration\n#\ngrafana:\n enabled: false\n\n#\n# addon prometheus configuration\n#\nprometheus:\n enabled: true\n\n#\n# addon servicegraph configuration\n#\nservicegraph:\n enabled: false\n\n#\n# addon jaeger tracing configuration\n#\ntracing:\n enabled: false\n\n#\n# addon kiali tracing configuration\n#\nkiali:\n enabled: false\n\n# Common settings used among istio subcharts.\nglobal:\n # Default hub for Istio images.\n # Releases are published to docker hub under 'istio' project.\n # Daily builds from prow are on gcr.io, and nightly builds from circle on docker.io/istionightly\n hub: gcr.io/istio-release\n\n # Default tag for Istio images.\n tag: master-latest-daily\n\n k8sIngress:\n enabled: false\n # Gateway used for legacy k8s Ingress resources. By default it is\n # using 'istio:ingress', to match 0.8 config. It requires that\n # ingress.enabled is set to true. You can also set it\n # to ingressgateway, or any other gateway you define in the 'gateway'\n # section.\n gatewayName: ingress\n # enableHttps will add port 443 on the ingress.\n # It REQUIRES that the certificates are installed in the\n # expected secrets - enabling this option without certificates\n # will result in LDS rejection and the ingress will not work.\n enableHttps: false\n\n proxy:\n image: proxyv2\n\n # DNS domain suffix for pilot proxy agent. Default value is \"${POD_NAMESPACE}.svc.cluster.local\".\n proxyDomain: \"\"\n\n # DNS domain suffix for pilot proxy discovery. Default value is \"cluster.local\".\n discoveryDomain: \"\"\n\n # Resources for the sidecar.\n resources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Controls number of Proxy worker threads.\n # If set to 0 (default), then start worker thread for each CPU thread/core.\n concurrency: 0\n\n # Configures the access log for each sidecar. Setting it to an empty string will\n # disable access log for sidecar.\n accessLogFile: \"/dev/stdout\"\n\n #If set to true, istio-proxy container will have privileged securityContext\n privileged: false\n\n # If set, newly injected sidecars will have core dumps enabled.\n enableCoreDump: false\n\n # Default port for Pilot agent health checks. A value of 0 will disable health checking.\n statusPort: 15020\n\n # The initial delay for readiness probes in seconds.\n readinessInitialDelaySeconds: 1\n\n # The period between readiness probes.\n readinessPeriodSeconds: 2\n\n # The number of successive failed probes before indicating readiness failure.\n readinessFailureThreshold: 30\n\n # istio egress capture whitelist\n # https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly\n # example: includeIPRanges: \"172.30.0.0/16,172.20.0.0/16\"\n # would only capture egress traffic on those two IP Ranges, all other outbound traffic would\n # be allowed by the sidecar\n includeIPRanges: \"*\"\n excludeIPRanges: \"\"\n\n # istio ingress capture whitelist\n # examples:\n # Redirect no inbound traffic to Envoy: --includeInboundPorts=\"\"\n # Redirect all inbound traffic to Envoy: --includeInboundPorts=\"*\"\n # Redirect only selected ports: --includeInboundPorts=\"80,8080\"\n includeInboundPorts: \"*\"\n excludeInboundPorts: \"\"\n\n # This controls the 'policy' in the sidecar injector.\n autoInject: enabled\n\n # Sets the destination Statsd in envoy (the value of the \"--statsdUdpAddress\" proxy argument\n # would be \u003chost\u003e:\u003cport\u003e).\n # Disabled by default.\n # The istio-statsd-prom-bridge is deprecated and should not be used moving forward.\n envoyStatsd:\n # If enabled is set to true, host and port must also be provided. Istio no longer provides a statsd collector.\n enabled: false\n host: # example: statsd-svc\n port: # example: 9125\n\n # This controls the stats collection for proxies. To disable stats\n # collection, set the prometheusPort to 0.\n stats:\n prometheusPort: 15090\n\n # Specify which tracer to use. One of: lightstep, zipkin\n tracer: \"zipkin\"\n\n proxy_init:\n # Base name for the proxy_init container, used to configure iptables.\n image: proxy_init\n\n # imagePullPolicy is applied to istio control plane components.\n # local tests require IfNotPresent, to avoid uploading to dockerhub.\n # TODO: Switch to Always as default, and override in the local tests.\n imagePullPolicy: IfNotPresent\n\n # controlPlaneMtls enabled. Will result in delays starting the pods while secrets are\n # propagated, not recommended for tests.\n controlPlaneSecurityEnabled: false\n\n # SDS enabled. IF set to true, mTLS certificates for the sidecars will be\n # distributed through the SecretDiscoveryService instead of using K8S secrets to mount the certificates.\n sdsEnabled: false\n\n # disablePolicyChecks disables mixer policy checks.\n # Will set the value with same name in istio config map - pilot needs to be restarted to take effect.\n disablePolicyChecks: false\n\n # EnableTracing sets the value with same name in istio config map, requires pilot restart to take effect.\n enableTracing: true\n\n # Configuration for each of the supported tracers\n tracer:\n # Configuration for envoy to send trace data to LightStep.\n # Disabled by default.\n # address: the \u003chost\u003e:\u003cport\u003e of the satellite pool\n # accessToken: required for sending data to the pool\n # secure: specifies whether data should be sent with TLS\n # cacertPath: the path to the file containing the cacert to use when verifying TLS. If secure is true, this is\n # required. If a value is specified then a secret called \"lightstep.cacert\" must be created in the destination\n # namespace with the key matching the base of the provided cacertPath and the value being the cacert itself.\n #\n lightstep:\n address: \"\" # example: lightstep-satellite:443\n accessToken: \"\" # example: abcdefg1234567\n secure: true # example: true|false\n cacertPath: \"\" # example: /etc/lightstep/cacert.pem\n zipkin:\n # Host:Port for reporting trace data in zipkin format. If not specified, will default to\n # zipkin service (port 9411) in the same namespace as the other istio components.\n address: \"\"\n\n # Default mtls policy. If true, mtls between services will be enabled by default.\n mtls:\n # Default setting for service-to-service mtls. Can be set explicitly using\n # destination rules or service annotations.\n enabled: false\n\n # ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace\n # to use for pulling any images in pods that reference this ServiceAccount.\n # For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)\n # ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.\n # Must be set for any clustser configured with private docker registry.\n imagePullSecrets:\n # - private-registry-key\n\n # Specify pod scheduling arch(amd64, ppc64le, s390x) and weight as follows:\n # 0 - Never scheduled\n # 1 - Least preferred\n # 2 - No preference\n # 3 - Most preferred\n arch:\n amd64: 2\n s390x: 2\n ppc64le: 2\n\n # Whether to restrict the applications namespace the controller manages;\n # If not set, controller watches all namespaces\n oneNamespace: false\n\n # Whether to perform server-side validation of configuration.\n configValidation: true\n\n # Custom DNS config for the pod to resolve names of services in other\n # clusters. Use this to add additional search domains, and other settings.\n # see\n # https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config\n # This does not apply to gateway pods as they typically need a different\n # set of DNS settings than the normal application pods (e.g., in\n # multicluster scenarios).\n #podDNSConfig:\n # dnsConfig:\n # searches: #some dummy examples\n # - foo.bar.baz\n # - {{ \"[[ valueOrDefault .DeploymentMeta.Namespace \\\"default\\\" ]]\" }}.bazoo\n\n # If set to true, the pilot and citadel mtls will be exposed on the\n # ingress gateway\n meshExpansion:\n enabled: false\n # If set to true, the pilot and citadel mtls and the plain text pilot ports\n # will be exposed on an internal gateway\n useILB: false\n\n multiCluster:\n # Set to true to connect two kubernetes clusters using a LB gateway as\n # the only entry point into the cluster (instead of requiring pod to\n # pod connectivity across two clusters). Note that for this system to\n # work, service objects from remote clusters have to be replicated to\n # local cluster (without the pod selectors). In addition, service\n # entries have to be added for each replicated service object, where\n # the endpoints in the service entry point to the remote cluster's\n # mcgatewayIP:15443. All clusters should be using Istio mTLS and must\n # have a shared root CA for this model to work.\n connectUsingGateway: false\n\n # A minimal set of requested resources to applied to all deployments so that\n # Horizontal Pod Autoscaler will be able to function (if set).\n # Each component can overwrite these default values by adding its own resources\n # block in the relevant section below and setting the desired resources values.\n defaultResources:\n requests:\n cpu: 10m\n # memory: 128Mi\n # limits:\n # cpu: 100m\n # memory: 128Mi\n\n # Kubernetes \u003e=v1.11.0 will create two PriorityClass, including system-cluster-critical and\n # system-node-critical, it is better to configure this in order to make sure your Istio pods\n # will not be killed because of low priority class.\n # Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass\n # for more detail.\n priorityClassName: \"\"\n\n # Include the crd definition when generating the template.\n # For 'helm template' and helm install \u003e 2.10 it should be true.\n # For helm \u003c 2.9, crds must be installed ahead of time with\n # 'kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml\n # and this options must be set off.\n crds: true\n\n # Use the Mesh Control Protocol (MCP) for configuring Mixer and\n # Pilot. Requires galley (`--set galley.enabled=true`).\n useMCP: false\n", "upstream": "https://github.com/istio/istio/tree/d3eed9a347ad02f0b79e3f92330878f88953cf64/install/kubernetes/helm/istio", diff --git a/integration/init/jaeger-helm/expected/.ship/state.json b/integration/init/jaeger-helm/expected/.ship/state.json index e323c3446..bd38bdb48 100644 --- a/integration/init/jaeger-helm/expected/.ship/state.json +++ b/integration/init/jaeger-helm/expected/.ship/state.json @@ -1,7 +1,6 @@ { "v1": { "config": {}, - "helmValues": "# Default values for jaeger.\n# This is a YAML-formatted file.\n# Jaeger values are grouped by component. Cassandra values override subchart values\n\nprovisionDataStore:\n cassandra: true\n elasticsearch: false\n\ntag: 1.4.1\n\nstorage:\n # allowed values (cassandra, elasticsearch)\n type: cassandra\n cassandra:\n host: cassandra\n port: 9042\n user: user\n password: password\n elasticsearch:\n scheme: http\n host: elasticsearch\n port: 9200\n user: elastic\n password: changeme\n nodesWanOnly: false\n\n# Begin: Override values on the Cassandra subchart to customize for Jaeger\ncassandra:\n image:\n tag: 3.11\n persistence:\n # To enable persistence, please see the documentation for the Cassandra chart\n enabled: false\n config:\n cluster_name: jaeger\n seed_size: 1\n dc_name: dc1\n rack_name: rack1\n endpoint_snitch: GossipingPropertyFileSnitch\n# End: Override values on the Cassandra subchart to customize for Jaeger\n\n# Begin: Default values for the various components of Jaeger\n# This chart has been based on the Kubernetes integration found in the following repo:\n# https://github.com/jaegertracing/jaeger-kubernetes/blob/master/production/jaeger-production-template.yml\n#\n# This is the jaeger-cassandra-schema Job which sets up the Cassandra schema for\n# use by Jaeger\nschema:\n annotations: {}\n image: jaegertracing/jaeger-cassandra-schema\n pullPolicy: IfNotPresent\n # Acceptable values are test and prod. Default is for production use.\n mode: prod\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n\n# Begin: Override values on the Elasticsearch subchart to customize for Jaeger\nelasticsearch:\n image:\n tag: \"5.4\"\n cluster:\n name: \"tracing\"\n data:\n persistence:\n enabled: false\n rbac:\n create: false\n\nagent:\n enabled: true\n annotations: {}\n image: jaegertracing/jaeger-agent\n pullPolicy: IfNotPresent\n collector:\n host: null\n port: null\n cmdlineParams: {}\n daemonset:\n useHostPort: false\n service:\n annotations: {}\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n type: ClusterIP\n # zipkinThriftPort :accept zipkin.thrift over compact thrift protocol\n zipkinThriftPort: 5775\n # compactPort: accept jaeger.thrift over compact thrift protocol\n compactPort: 6831\n # binaryPort: accept jaeger.thrift over binary thrift protocol\n binaryPort: 6832\n # samplingPort: (HTTP) serve configs, sampling strategies\n samplingPort: 5778\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n nodeSelector: {}\n podAnnotations: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n useHostNetwork: false\n dnsPolicy: ClusterFirst\n\ncollector:\n enabled: true\n annotations: {}\n image: jaegertracing/jaeger-collector\n pullPolicy: IfNotPresent\n dnsPolicy: ClusterFirst\n cmdlineParams: {}\n replicaCount: 1\n service:\n annotations: {}\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n type: ClusterIP\n # tchannelPort: used by jaeger-agent to send spans in jaeger.thrift format\n tchannelPort: 14267\n # httpPort: can accept spans directly from clients in jaeger.thrift format\n httpPort: 14268\n # can accept Zipkin spans in JSON or Thrift\n zipkinPort: 9411\n healthCheckPort: 14269\n resources: {}\n # limits:\n # cpu: 1\n # memory: 1Gi\n # requests:\n # cpu: 500m\n # memory: 512Mi\n nodeSelector: {}\n podAnnotations: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n\nquery:\n enabled: true\n annotations: {}\n image: jaegertracing/jaeger-query\n pullPolicy: IfNotPresent\n dnsPolicy: ClusterFirst\n cmdlineParams: {}\n healthCheckPort: 16687\n replicaCount: 1\n service:\n annotations: {}\n type: ClusterIP\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n # queryPort: externally accessible port for UI and API\n queryPort: 80\n # targetPort: the internal port the UI and API are exposed on\n targetPort: 16686\n ingress:\n enabled: false\n annotations: {}\n # Used to create an Ingress record.\n # hosts:\n # - chart-example.local\n # annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n # tls:\n # Secrets must be manually created in the namespace.\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n nodeSelector: {}\n podAnnotations: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n\nspark:\n enabled: false\n annotations: {}\n image: jaegertracing/spark-dependencies\n tag: latest\n pullPolicy: Always\n schedule: \"49 23 * * *\"\n successfulJobsHistoryLimit: 5\n failedJobsHistoryLimit: 5\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n nodeSelector: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n# End: Default values for the various components of Jaeger\n\nhotrod:\n enabled: false\n replicaCount: 1\n image:\n repository: jaegertracing/example-hotrod\n tag: latest\n pullPolicy: Always\n service:\n annotations: {}\n name: hotrod\n type: ClusterIP\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n externalPort: 80\n internalPort: 8080\n ingress:\n enabled: false\n # Used to create Ingress record (should be used with service.type: ClusterIP).\n hosts:\n - chart-example.local\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n resources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n tracing:\n host: null\n port: 6831\n", "releaseName": "jaeger", "helmValuesDefaults": "# Default values for jaeger.\n# This is a YAML-formatted file.\n# Jaeger values are grouped by component. Cassandra values override subchart values\n\nprovisionDataStore:\n cassandra: true\n elasticsearch: false\n\ntag: 1.4.1\n\nstorage:\n # allowed values (cassandra, elasticsearch)\n type: cassandra\n cassandra:\n host: cassandra\n port: 9042\n user: user\n password: password\n elasticsearch:\n scheme: http\n host: elasticsearch\n port: 9200\n user: elastic\n password: changeme\n nodesWanOnly: false\n\n# Begin: Override values on the Cassandra subchart to customize for Jaeger\ncassandra:\n image:\n tag: 3.11\n persistence:\n # To enable persistence, please see the documentation for the Cassandra chart\n enabled: false\n config:\n cluster_name: jaeger\n seed_size: 1\n dc_name: dc1\n rack_name: rack1\n endpoint_snitch: GossipingPropertyFileSnitch\n# End: Override values on the Cassandra subchart to customize for Jaeger\n\n# Begin: Default values for the various components of Jaeger\n# This chart has been based on the Kubernetes integration found in the following repo:\n# https://github.com/jaegertracing/jaeger-kubernetes/blob/master/production/jaeger-production-template.yml\n#\n# This is the jaeger-cassandra-schema Job which sets up the Cassandra schema for\n# use by Jaeger\nschema:\n annotations: {}\n image: jaegertracing/jaeger-cassandra-schema\n pullPolicy: IfNotPresent\n # Acceptable values are test and prod. Default is for production use.\n mode: prod\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n\n# Begin: Override values on the Elasticsearch subchart to customize for Jaeger\nelasticsearch:\n image:\n tag: \"5.4\"\n cluster:\n name: \"tracing\"\n data:\n persistence:\n enabled: false\n rbac:\n create: false\n\nagent:\n enabled: true\n annotations: {}\n image: jaegertracing/jaeger-agent\n pullPolicy: IfNotPresent\n collector:\n host: null\n port: null\n cmdlineParams: {}\n daemonset:\n useHostPort: false\n service:\n annotations: {}\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n type: ClusterIP\n # zipkinThriftPort :accept zipkin.thrift over compact thrift protocol\n zipkinThriftPort: 5775\n # compactPort: accept jaeger.thrift over compact thrift protocol\n compactPort: 6831\n # binaryPort: accept jaeger.thrift over binary thrift protocol\n binaryPort: 6832\n # samplingPort: (HTTP) serve configs, sampling strategies\n samplingPort: 5778\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n nodeSelector: {}\n podAnnotations: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n useHostNetwork: false\n dnsPolicy: ClusterFirst\n\ncollector:\n enabled: true\n annotations: {}\n image: jaegertracing/jaeger-collector\n pullPolicy: IfNotPresent\n dnsPolicy: ClusterFirst\n cmdlineParams: {}\n replicaCount: 1\n service:\n annotations: {}\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n type: ClusterIP\n # tchannelPort: used by jaeger-agent to send spans in jaeger.thrift format\n tchannelPort: 14267\n # httpPort: can accept spans directly from clients in jaeger.thrift format\n httpPort: 14268\n # can accept Zipkin spans in JSON or Thrift\n zipkinPort: 9411\n healthCheckPort: 14269\n resources: {}\n # limits:\n # cpu: 1\n # memory: 1Gi\n # requests:\n # cpu: 500m\n # memory: 512Mi\n nodeSelector: {}\n podAnnotations: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n\nquery:\n enabled: true\n annotations: {}\n image: jaegertracing/jaeger-query\n pullPolicy: IfNotPresent\n dnsPolicy: ClusterFirst\n cmdlineParams: {}\n healthCheckPort: 16687\n replicaCount: 1\n service:\n annotations: {}\n type: ClusterIP\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n # queryPort: externally accessible port for UI and API\n queryPort: 80\n # targetPort: the internal port the UI and API are exposed on\n targetPort: 16686\n ingress:\n enabled: false\n annotations: {}\n # Used to create an Ingress record.\n # hosts:\n # - chart-example.local\n # annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n # tls:\n # Secrets must be manually created in the namespace.\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n nodeSelector: {}\n podAnnotations: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n\nspark:\n enabled: false\n annotations: {}\n image: jaegertracing/spark-dependencies\n tag: latest\n pullPolicy: Always\n schedule: \"49 23 * * *\"\n successfulJobsHistoryLimit: 5\n failedJobsHistoryLimit: 5\n resources: {}\n # limits:\n # cpu: 500m\n # memory: 512Mi\n # requests:\n # cpu: 256m\n # memory: 128Mi\n nodeSelector: {}\n ## Additional pod labels\n ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\n podLabels: {}\n ## Allow the scheduling on tainted nodes (requires Kubernetes \u003e= 1.6)\n ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\n tolerations: []\n# End: Default values for the various components of Jaeger\n\nhotrod:\n enabled: false\n replicaCount: 1\n image:\n repository: jaegertracing/example-hotrod\n tag: latest\n pullPolicy: Always\n service:\n annotations: {}\n name: hotrod\n type: ClusterIP\n # List of IP ranges that are allowed to access the load balancer (if supported)\n loadBalancerSourceRanges: []\n externalPort: 80\n internalPort: 8080\n ingress:\n enabled: false\n # Used to create Ingress record (should be used with service.type: ClusterIP).\n hosts:\n - chart-example.local\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n tls:\n # Secrets must be manually created in the namespace.\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n resources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n tracing:\n host: null\n port: 6831\n", "upstream": "https://github.com/helm/charts/tree/f839d88d087511eb163b12a394c05a63f10d5ee4/incubator/jaeger", diff --git a/integration/init/kibana/expected/.ship/state.json b/integration/init/kibana/expected/.ship/state.json new file mode 100644 index 000000000..8fa5de3d9 --- /dev/null +++ b/integration/init/kibana/expected/.ship/state.json @@ -0,0 +1,16 @@ +{ + "v1": { + "config": {}, + "releaseName": "kibana", + "helmValuesDefaults": "image:\n repository: \"docker.elastic.co/kibana/kibana-oss\"\n tag: \"6.5.4\"\n pullPolicy: \"IfNotPresent\"\n\ncommandline:\n args: []\n\nenv: {}\n # All Kibana configuration options are adjustable via env vars.\n # To adjust a config option to an env var uppercase + replace `.` with `_`\n # Ref: https://www.elastic.co/guide/en/kibana/current/settings.html\n #\n # ELASTICSEARCH_URL: http://elasticsearch-client:9200\n # SERVER_PORT: 5601\n # LOGGING_VERBOSE: \"true\"\n # SERVER_DEFAULTROUTE: \"/app/kibana\"\n\nfiles:\n kibana.yml:\n ## Default Kibana configuration from kibana-docker.\n server.name: kibana\n server.host: \"0\"\n elasticsearch.url: http://elasticsearch:9200\n\n ## Custom config properties below\n ## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html\n # server.port: 5601\n # logging.verbose: \"true\"\n # server.defaultRoute: \"/app/kibana\"\n\ndeployment:\n annotations: {}\n\nservice:\n type: ClusterIP\n externalPort: 443\n internalPort: 5601\n # authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer\n ## External IP addresses of service\n ## Default: nil\n ##\n # externalIPs:\n # - 192.168.0.1\n #\n ## LoadBalancer IP if service.type is LoadBalancer\n ## Default: nil\n ##\n # loadBalancerIP: 10.2.2.2\n annotations: {}\n # Annotation example: setup ssl with aws cert when service.type is LoadBalancer\n # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT\n labels: {}\n ## Label example: show service URL in `kubectl cluster-info`\n # kubernetes.io/cluster-service: \"true\"\n ## Limit load balancer source ips to list of CIDRs (where available)\n # loadBalancerSourceRanges: []\n\ningress:\n enabled: false\n # hosts:\n # - kibana.localhost.localdomain\n # - localhost.localdomain/kibana\n # annotations:\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n # tls:\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nserviceAccount:\n # Specifies whether a service account should be created\n create: false\n # The name of the service account to use.\n # If not set and create is true, a name is generated using the fullname template\n # If set and create is false, the service account must be existing\n name:\n\nlivenessProbe:\n enabled: false\n initialDelaySeconds: 30\n timeoutSeconds: 10\n\nreadinessProbe:\n enabled: false\n initialDelaySeconds: 30\n timeoutSeconds: 10\n periodSeconds: 10\n successThreshold: 5\n\n# Enable an authproxy. Specify container in extraContainers\nauthProxyEnabled: false\n\nextraContainers: |\n# - name: proxy\n# image: quay.io/gambol99/keycloak-proxy:latest\n# args:\n# - --resource=uri=/*\n# - --discovery-url=https://discovery-url\n# - --client-id=client\n# - --client-secret=secret\n# - --listen=0.0.0.0:5602\n# - --upstream-url=http://127.0.0.1:5601\n# ports:\n# - name: web\n# containerPort: 9090\nresources: {}\n # limits:\n # cpu: 100m\n # memory: 300Mi\n # requests:\n # cpu: 100m\n # memory: 300Mi\n\npriorityClassName: \"\"\n\n# Affinity for pod assignment\n# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n# affinity: {}\n\n# Tolerations for pod assignment\n# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\ntolerations: []\n\n# Node labels for pod assignment\n# Ref: https://kubernetes.io/docs/user-guide/node-selection/\nnodeSelector: {}\n\npodAnnotations: {}\nreplicaCount: 1\nrevisionHistoryLimit: 3\n\n# To export a dashboard from a running Kibana 6.3.x use:\n# curl --user \u003cusername\u003e:\u003cpassword\u003e -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=\u003csome-dashboard-uuid\u003e \u003e my-dashboard.json\n# A dashboard is defined by a name and a string with the json payload or the download url\ndashboardImport:\n timeout: 60\n xpackauth:\n enabled: false\n username: myuser\n password: mypass\n dashboards: {}\n # k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json\n\n# List of plugins to install using initContainer\n# NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well.\nplugins:\n # set to true to enable plugins installation\n enabled: false\n # set to true to remove all kibana plugins before installation\n reset: false\n # Use \u003cplugin_name,version,url\u003e to add/upgrade plugin\n values:\n # - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip\n # - logtrail,0.1.30,https://github.com/sivasamyk/logtrail/releases/download/v0.1.30/logtrail-6.4.2-0.1.30.zip\n # - other_plugin\n\npersistentVolumeClaim:\n # set to true to use pvc\n enabled: false\n # set to true to use you own pvc\n existingClaim: false\n annotations: {}\n\n accessModes:\n - ReadWriteOnce\n size: \"5Gi\"\n ## If defined, storageClassName: \u003cstorageClass\u003e\n ## If set to \"-\", storageClassName: \"\", which disables dynamic provisioning\n ## If undefined (the default) or set to null, no storageClassName spec is\n ## set, choosing the default provisioner. (gp2 on AWS, standard on\n ## GKE, AWS \u0026 OpenStack)\n ##\n # storageClass: \"-\"\n\n# default security context\nsecurityContext:\n enabled: false\n allowPrivilegeEscalation: false\n runAsUser: 1000\n fsGroup: 2000\n\nextraConfigMapMounts: []\n # - name: logtrail-configs\n # configMap: kibana-logtrail\n # mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json\n # subPath: logtrail.json\n", + "upstream": "github.com/replicatedhq/test-charts/tree/316b56dd3c1209a470dccaa8016c4cad76de0299/kibana", + "metadata": { + "applicationType": "helm", + "icon": "https://raw.githubusercontent.com/elastic/kibana/master/src/ui/public/icons/kibana-color.svg", + "name": "kibana", + "releaseNotes": "Add kibana (#27)", + "version": "1.1.2" + }, + "contentSHA": "f54c36389890161712f38e5c0a7b46586193d3ee6e1cb773923e6c620066e840" + } +} \ No newline at end of file diff --git a/integration/init/kibana/expected/base/configmap.yaml b/integration/init/kibana/expected/base/configmap.yaml new file mode 100644 index 000000000..065d7882b --- /dev/null +++ b/integration/init/kibana/expected/base/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +data: + kibana.yml: | + elasticsearch.url: http://elasticsearch:9200 + server.host: "0" + server.name: kibana +kind: ConfigMap +metadata: + labels: + app: kibana + release: kibana + name: kibana diff --git a/integration/init/kibana/expected/base/deployment.yaml b/integration/init/kibana/expected/base/deployment.yaml new file mode 100644 index 000000000..e10d5c7e1 --- /dev/null +++ b/integration/init/kibana/expected/base/deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + app: kibana + release: kibana + name: kibana +spec: + replicas: 1 + revisionHistoryLimit: 3 + template: + metadata: + annotations: + checksum/config: 0d42b0d5d1f2c84f74f7e20e3b643f2dabf4f96963c1b4ced060624e7211f4d4 + labels: + app: kibana + release: kibana + spec: + containers: + - env: [] + image: docker.elastic.co/kibana/kibana-oss:6.5.4 + imagePullPolicy: IfNotPresent + name: kibana + ports: + - containerPort: 5601 + name: kibana + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /usr/share/kibana/config/kibana.yml + name: kibana + subPath: kibana.yml + serviceAccountName: default + tolerations: [] + volumes: + - configMap: + name: kibana + name: kibana diff --git a/integration/init/kibana/expected/base/kustomization.yaml b/integration/init/kibana/expected/base/kustomization.yaml new file mode 100644 index 000000000..90834dbf6 --- /dev/null +++ b/integration/init/kibana/expected/base/kustomization.yaml @@ -0,0 +1,6 @@ +kind: "" +apiversion: "" +resources: +- configmap.yaml +- deployment.yaml +- service.yaml diff --git a/integration/init/kibana/expected/base/service.yaml b/integration/init/kibana/expected/base/service.yaml new file mode 100644 index 000000000..a488b29e1 --- /dev/null +++ b/integration/init/kibana/expected/base/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kibana + release: kibana + name: kibana +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 5601 + selector: + app: kibana + release: kibana + type: ClusterIP diff --git a/integration/init/kibana/expected/overlays/ship/kustomization.yaml b/integration/init/kibana/expected/overlays/ship/kustomization.yaml new file mode 100644 index 000000000..c80bb2245 --- /dev/null +++ b/integration/init/kibana/expected/overlays/ship/kustomization.yaml @@ -0,0 +1,4 @@ +kind: "" +apiversion: "" +bases: +- ../../base diff --git a/integration/init/kibana/expected/rendered.yaml b/integration/init/kibana/expected/rendered.yaml new file mode 100644 index 000000000..ed7153673 --- /dev/null +++ b/integration/init/kibana/expected/rendered.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +data: + kibana.yml: | + elasticsearch.url: http://elasticsearch:9200 + server.host: "0" + server.name: kibana +kind: ConfigMap +metadata: + labels: + app: kibana + release: kibana + name: kibana +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: kibana + release: kibana + name: kibana +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 5601 + selector: + app: kibana + release: kibana + type: ClusterIP +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + app: kibana + release: kibana + name: kibana +spec: + replicas: 1 + revisionHistoryLimit: 3 + template: + metadata: + annotations: + checksum/config: 0d42b0d5d1f2c84f74f7e20e3b643f2dabf4f96963c1b4ced060624e7211f4d4 + labels: + app: kibana + release: kibana + spec: + containers: + - env: [] + image: docker.elastic.co/kibana/kibana-oss:6.5.4 + imagePullPolicy: IfNotPresent + name: kibana + ports: + - containerPort: 5601 + name: kibana + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /usr/share/kibana/config/kibana.yml + name: kibana + subPath: kibana.yml + serviceAccountName: default + tolerations: [] + volumes: + - configMap: + name: kibana + name: kibana diff --git a/integration/init/kibana/metadata.yaml b/integration/init/kibana/metadata.yaml new file mode 100644 index 000000000..0c2f10d49 --- /dev/null +++ b/integration/init/kibana/metadata.yaml @@ -0,0 +1,3 @@ +upstream: "github.com/replicatedhq/test-charts/tree/316b56dd3c1209a470dccaa8016c4cad76de0299/kibana" +args: ["--prefer-git"] +skip_cleanup: false diff --git a/integration/update/basic/expected/.ship/state.json b/integration/update/basic/expected/.ship/state.json index 380c10e78..367cc0d9f 100755 --- a/integration/update/basic/expected/.ship/state.json +++ b/integration/update/basic/expected/.ship/state.json @@ -1,7 +1,7 @@ { "v1": { "config": {}, - "helmValues": "replicaCount: 5\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\nservice:\n type: ClusterIP\n port: 80\ningress:\n enabled: false\n annotations: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\n", + "helmValues": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 5\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", "releaseName": "basic", "helmValuesDefaults": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 1\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", "kustomize": { diff --git a/integration/update/excluded-basic/expected/.ship/state.json b/integration/update/excluded-basic/expected/.ship/state.json index e1e092fd3..87838bc36 100755 --- a/integration/update/excluded-basic/expected/.ship/state.json +++ b/integration/update/excluded-basic/expected/.ship/state.json @@ -1,7 +1,7 @@ { "v1": { "config": {}, - "helmValues": "replicaCount: 5\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\nservice:\n type: ClusterIP\n port: 80\ningress:\n enabled: false\n annotations: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\n", + "helmValues": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 5\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", "releaseName": "basic", "helmValuesDefaults": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 1\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", "kustomize": { diff --git a/integration/update/modify-chart/expected/.ship/state.json b/integration/update/modify-chart/expected/.ship/state.json index d173c41d9..ff976678e 100755 --- a/integration/update/modify-chart/expected/.ship/state.json +++ b/integration/update/modify-chart/expected/.ship/state.json @@ -1,7 +1,7 @@ { "v1": { "config": {}, - "helmValues": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\nservice:\n type: ClusterIP\n port: 80\ningress:\n enabled: false\n annotations: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\n", + "helmValues": "# Default values for modify-chart.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 1\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", "releaseName": "modify-chart", "helmValuesDefaults": "# Default values for modify-chart.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 2\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", "kustomize": { diff --git a/integration/update/values-static/expected/.ship/state.json b/integration/update/values-static/expected/.ship/state.json index 707bd718f..592367038 100755 --- a/integration/update/values-static/expected/.ship/state.json +++ b/integration/update/values-static/expected/.ship/state.json @@ -1,7 +1,7 @@ { "v1": { "config": {}, - "helmValues": "replicaCount: 2\nimage:\n repository: nginx\n tag: stable\n", + "helmValues": "replicaCount: 2\nimage:\n repository: nginx\n tag: stable\n\n", "releaseName": "values-update", "helmValuesDefaults": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n\n", "kustomize": { diff --git a/integration/update/version/expected/.ship/state.json b/integration/update/version/expected/.ship/state.json index 8b6be76e2..3747f67de 100755 --- a/integration/update/version/expected/.ship/state.json +++ b/integration/update/version/expected/.ship/state.json @@ -1,9 +1,9 @@ { "v1": { "config": {}, - "helmValues": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n pullPolicy: Always\nservice:\n type: ClusterIP\n port: 82\ningress:\n enabled: false\n annotations: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\n", + "helmValues": "replicaCount: 1\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\nservice:\n type: ClusterIP\n port: 80\ningress:\n enabled: false\n annotations: {}\n path: /\n hosts:\n - chart-example.local\n tls: []\nresources: {}\nnodeSelector: {}\ntolerations: []\naffinity: {}\n", "releaseName": "version", - "helmValuesDefaults": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 1\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: Always\n\nservice:\n type: ClusterIP\n port: 82\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", + "helmValuesDefaults": "# Default values for basic.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\nreplicaCount: 1\n\nimage:\n repository: nginx\n tag: stable\n pullPolicy: IfNotPresent\n\nservice:\n type: ClusterIP\n port: 80\n\ningress:\n enabled: false\n annotations: {}\n # kubernetes.io/ingress.class: nginx\n # kubernetes.io/tls-acme: \"true\"\n path: /\n hosts:\n - chart-example.local\n tls: []\n # - secretName: chart-example-tls\n # hosts:\n # - chart-example.local\n\nresources: {}\n # We usually recommend not to specify default resources and to leave this as a conscious\n # choice for the user. This also increases chances charts run on environments with little\n # resources, such as Minikube. If you do want to specify resources, uncomment the following\n # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n # limits:\n # cpu: 100m\n # memory: 128Mi\n # requests:\n # cpu: 100m\n # memory: 128Mi\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n", "upstream": "https://github.com/replicatedhq/test-charts/tree/_latest_/version", "metadata": { "applicationType": "helm", diff --git a/pkg/api/asset.go b/pkg/api/asset.go index a49acba2e..a00c24a71 100644 --- a/pkg/api/asset.go +++ b/pkg/api/asset.go @@ -100,8 +100,10 @@ type LocalAsset struct { } type ValuesFrom struct { - Path string `json:"path,omitempty" yaml:"path,omitempty" hcl:"path,omitempty"` - SaveToState bool `json:"save_to_state,omitempty" yaml:"save_to_state,omitempty" hcl:"save_to_state,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty" hcl:"path,omitempty"` + // SaveToState is used when a HelmValues step is not part of the lifecycle (e.g. Unfork) in order to save + // the merged helm values to state. + SaveToState bool `json:"save_to_state,omitempty" yaml:"save_to_state,omitempty" hcl:"save_to_state,omitempty"` } type ValuesFromLifecycle struct{} diff --git a/pkg/lifecycle/render/helm/template.go b/pkg/lifecycle/render/helm/template.go index 73c99f7f7..ae452fb8a 100644 --- a/pkg/lifecycle/render/helm/template.go +++ b/pkg/lifecycle/render/helm/template.go @@ -60,8 +60,7 @@ func NewTemplater( } } -var arrayLineRegex = regexp.MustCompile(`^\s*(args|volumes):\s*$`) -var envLineRegex = regexp.MustCompile(`^\s*env:\s*$`) +var arrayLineRegex = regexp.MustCompile(`^\s*(env|args|volumes):\s*$`) var valueLineRegex = regexp.MustCompile(`^\s*value:\s*$`) var nullValueLineRegex = regexp.MustCompile(`^(\s*value:)\s*null\s*$`) @@ -535,12 +534,6 @@ func fixLines(lines []string) []string { // next line is not a child, so this key has no contents, add an empty array lines[idx] = line + " []" } - } else if envLineRegex.MatchString(line) { - // line has `env:` and nothing else but whitespace - if !checkIsChild(line, nextLine(idx, lines)) { - // next line is not a child, so env has no contents, add an empty object - lines[idx] = line + " {}" - } } else if valueLineRegex.MatchString(line) { // line has `value:` and nothing else but whitespace if !checkIsChild(line, nextLine(idx, lines)) { diff --git a/pkg/lifecycle/render/helm/template_test.go b/pkg/lifecycle/render/helm/template_test.go index 665de19df..6124f0c1e 100644 --- a/pkg/lifecycle/render/helm/template_test.go +++ b/pkg/lifecycle/render/helm/template_test.go @@ -534,7 +534,7 @@ func Test_validateGeneratedFiles(t *testing.T) { }, { path: "test/missingEnv.yaml", - contents: ` env: {}`, + contents: ` env: []`, }, { path: "test/notMissingMultilineEnv.yaml", @@ -547,7 +547,7 @@ func Test_validateGeneratedFiles(t *testing.T) { { path: "test/missingMultilineEnv.yaml", contents: ` - env: {} + env: [] something:`, }, }, @@ -647,7 +647,7 @@ func Test_validateGeneratedFiles(t *testing.T) { { path: "test/comment_line_env.yaml", contents: ` - env: {} + env: [] #item env: @@ -722,7 +722,7 @@ func Test_validateGeneratedFiles(t *testing.T) { path: "test/everything.yaml", contents: ` args: [] - env: {} + env: [] volumes: [] value: "" value: "" diff --git a/pkg/specs/chart.go b/pkg/specs/chart.go index 3e1375496..49fdd2625 100644 --- a/pkg/specs/chart.go +++ b/pkg/specs/chart.go @@ -106,8 +106,7 @@ func (r *Resolver) DefaultHelmRelease(chartPath string, upstream string) api.Spe ChartRoot: chartPath, }, ValuesFrom: &api.ValuesFrom{ - Path: constants.ShipPathInternalTmp, - SaveToState: true, + Path: constants.ShipPathInternalTmp, }, Upstream: upstream, }, @@ -194,10 +193,6 @@ For continuous notification and preparation of application updates via email, we return spec } -func (r *Resolver) DefaultRawUnforkRelease(forkedPath string, upstreamPath string) api.Spec { - return api.Spec{} -} - func (r *Resolver) DefaultRawRelease(basePath string) api.Spec { spec := api.Spec{ Assets: api.Assets{