diff --git a/charts/pega/config/deploy/server.xml.tmpl b/charts/pega/config/deploy/server.xml.tmpl index 58833910a..66853b1a3 100644 --- a/charts/pega/config/deploy/server.xml.tmpl +++ b/charts/pega/config/deploy/server.xml.tmpl @@ -82,7 +82,7 @@ - {{ if or (exists "/opt/pega/tomcatcertsmount/TOMCAT_KEYSTORE_CONTENT") (exists "/opt/pega/tomcatcertsmount/TOMCAT_CERTIFICATE_FILE") }} + {{ if or (exists .Env.TOMCAT_KEYSTORE_CONTENT) (exists "/opt/pega/tomcatcertsmount/TOMCAT_CERTIFICATE_FILE") }} - {{ if ( and (exists "/opt/pega/tomcatcertsmount/TOMCAT_KEYSTORE_CONTENT") .Env.TOMCAT_KEYSTORE_PASSWORD ) }} + {{ if ( and (exists .Env.TOMCAT_KEYSTORE_CONTENT) .Env.TOMCAT_KEYSTORE_PASSWORD ) }} - {{ else }} diff --git a/charts/pega/templates/_helpers.tpl b/charts/pega/templates/_helpers.tpl index dfa3a714b..8a31cf448 100644 --- a/charts/pega/templates/_helpers.tpl +++ b/charts/pega/templates/_helpers.tpl @@ -58,15 +58,19 @@ {{- define "pegaVolumeTomcatKeystoreTemplate" }} - name: {{ template "pegaVolumeTomcatKeystore" }} - secret: - # This name will be referred in the volume mounts kind. - {{ if ((.node.service).tls).external_secret_name }} - secretName: {{ ((.node.service).tls).external_secret_name }} + projected: + defaultMode: 420 + sources: + {{ if (((.node.service).tls).external_secret_names) }} + {{- range (((.node.service).tls).external_secret_names) }} + - secret: + name: {{ . }} + {{- end }} {{ else }} - secretName: {{ template "pegaTomcatKeystoreSecret" $ }} + # This name will be referred in the volume mounts kind. + - secret: + name: {{ template "pegaTomcatKeystoreSecret" $ }} {{ end }} - # Used to specify permissions on files within the volume. - defaultMode: 420 {{- end}} {{- define "pegaVolumeConfig" }}pega-volume-config{{- end }} diff --git a/charts/pega/templates/_pega-deployment.tpl b/charts/pega/templates/_pega-deployment.tpl index 974b1ca61..0db586531 100644 --- a/charts/pega/templates/_pega-deployment.tpl +++ b/charts/pega/templates/_pega-deployment.tpl @@ -173,6 +173,12 @@ spec: - name: COSMOS_SETTINGS value: "Pega-UIEngine/cosmosservicesURI=/c11n" {{- end }} +{{- if ((.node.service).tls).enabled }} + - name: EXTERNAL_KEYSTORE_NAME + value: "{{ (((.node.service).tls).external_keystore_name) }}" + - name: EXTERNAL_KEYSTORE_PASSWORD + value: "{{ (((.node.service).tls).external_keystore_password) }}" +{{- end }} {{- if .custom }} {{- if .custom.env }} # Additional custom env vars diff --git a/charts/pega/templates/_pega-traefik-config.tpl b/charts/pega/templates/_pega-traefik-config.tpl index d8027d91a..9fd039266 100644 --- a/charts/pega/templates/_pega-traefik-config.tpl +++ b/charts/pega/templates/_pega-traefik-config.tpl @@ -16,8 +16,8 @@ spec: {{- end }} #For traefik, it expects the root CA certificate in a secret under the field ca.crt rootCAsSecrets: - {{- if .node.service.tls.external_secret_name }} - - {{ .node.service.tls.external_secret_name }} + {{- if .node.service.tls.external_secret_names }} + - {{ first .node.service.tls.external_secret_names }} {{- else }} - {{ .depname }}-tomcat-keystore-secret {{- end }} diff --git a/charts/pega/values.yaml b/charts/pega/values.yaml index 83c9cdf5b..8c18c13b4 100644 --- a/charts/pega/values.yaml +++ b/charts/pega/values.yaml @@ -163,7 +163,14 @@ global: # To avoid entering the certificate values in plain text, configure the keystore, keystorepassword, cacertificate parameter # values in the External Secrets Manager, and enter the external secret name below # make sure the keys in the secret should be TOMCAT_KEYSTORE_CONTENT, TOMCAT_KEYSTORE_PASSWORD and ca.crt respectively - external_secret_name: "" + # In case of providing multiple secrets, please provide them in comma separated string format. + external_secret_names: [] + # If using tools like cert-manager to generate certificates, please provide the keystore name that is autogenerated by the external tool. + # Default is TOMCAT_KEYSTORE_CONTENT + external_keystore_name: "" + # If using external secrets operator and not using standard Password Key, please provide the key for keystore password. + # Default is TOMCAT_KEYSTORE_PASSWORD + external_keystore_password: "" keystore: keystorepassword: port: 443 diff --git a/terratest/src/test/pega/data/expectedInstallDeployServer.xml.tmpl b/terratest/src/test/pega/data/expectedInstallDeployServer.xml.tmpl index 58833910a..66853b1a3 100644 --- a/terratest/src/test/pega/data/expectedInstallDeployServer.xml.tmpl +++ b/terratest/src/test/pega/data/expectedInstallDeployServer.xml.tmpl @@ -82,7 +82,7 @@ - {{ if or (exists "/opt/pega/tomcatcertsmount/TOMCAT_KEYSTORE_CONTENT") (exists "/opt/pega/tomcatcertsmount/TOMCAT_CERTIFICATE_FILE") }} + {{ if or (exists .Env.TOMCAT_KEYSTORE_CONTENT) (exists "/opt/pega/tomcatcertsmount/TOMCAT_CERTIFICATE_FILE") }} - {{ if ( and (exists "/opt/pega/tomcatcertsmount/TOMCAT_KEYSTORE_CONTENT") .Env.TOMCAT_KEYSTORE_PASSWORD ) }} + {{ if ( and (exists .Env.TOMCAT_KEYSTORE_CONTENT) .Env.TOMCAT_KEYSTORE_PASSWORD ) }} - {{ else }} diff --git a/terratest/src/test/pega/data/pega-tier-service-override_values.yaml b/terratest/src/test/pega/data/pega-tier-service-override_values.yaml new file mode 100644 index 000000000..d41756d57 --- /dev/null +++ b/terratest/src/test/pega/data/pega-tier-service-override_values.yaml @@ -0,0 +1,553 @@ +--- +global: + # This values.yaml file is an example. For more information about + # each configuration option, see the project readme. + + # Enter your Kubernetes provider. + provider: "YOUR_KUBERNETES_PROVIDER" + + deployment: + # The name specified will be used to prefix all of the Pega pods (replacing "pega" with something like "app1-dev"). + name: "pega" + + # Deploy Pega nodes + actions: + execute: "deploy" + + # Add custom certificates to be mounted to container + # to support custom certificates as plain text (less secure), pass them directly using the certificates parameter; + # to support multiple custom certificates as external secrets, specify each of your external secrets + # as an array of comma-separated strings using the certificatesSecrets parameter. + certificatesSecrets: [] + certificates: + + # Add krb5.conf file content here. + # Feature is used for Decisioning data flows to fetch data from Kafka or HBase streams + kerberos: + + # If a storage class to be passed to the VolumeClaimTemplates in search and stream pods, it can be specified here: + storageClassName: "" + # Provide JDBC connection information to the Pega relational database + # If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties. + jdbc: + # url Valid values are: + # + # Oracle jdbc:oracle:thin:@//localhost:1521/dbName + # IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName + # IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true; + # progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; + # SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false + # PostgreSQL jdbc:postgresql://localhost:5432/dbName + url: "YOUR_JDBC_URL" + # driverClass -- jdbc class. Valid values are: + # + # Oracle oracle.jdbc.OracleDriver + # IBM DB/2 com.ibm.db2.jcc.DB2Driver + # SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver + # PostgreSQL org.postgresql.Driver + driverClass: "YOUR_JDBC_DRIVER_CLASS" + # pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres + dbType: "YOUR_DATABASE_TYPE" + # For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri' + driverUri: "YOUR_JDBC_DRIVER_URI" + username: "YOUR_JDBC_USERNAME" + password: "YOUR_JDBC_PASSWORD" + # To avoid exposing username & password, leave the jdbc.password & jdbc.username parameters empty (no quotes), + # configure JDBC username & password parameters in the External Secrets Manager, and enter the external secret for the credentials + # make sure the keys in the secret should be DB_USERNAME and DB_PASSWORD respectively + external_secret_name: "" + # CUSTOM CONNECTION PROPERTIES + # Use the connectionProperties parameter to pass connection settings to your deployment + # by adding a list of semi-colon-delimited required connection setting. The list string must end with ";". + # For example, you can set a custom authentication using Azure Managed Identity and avoid using a password. + # To pass an Authentication method and a managed identity, MSI Client ID, + # set: connectionProperties: "Authentication=ActiveDirectoryMSI;msiClientId=;" + connectionProperties: "" + rulesSchema: "YOUR_RULES_SCHEMA" + dataSchema: "YOUR_DATA_SCHEMA" + customerDataSchema: "" + + customArtifactory: + # If you use a secured custom artifactory to manager your JDBC driver, + # provide the authentication details below by filling in the appropriate authentication section, + # either basic or apiKey. + authentication: + # Provide the basic authentication credentials or the API key authentication details to satisfy your custom artifactory authentication mechanism. + basic: + username: "" + password: "" + apiKey: + headerName: "" + value: "" + # To avoid exposing basic.username,basic.password,apiKey.headerName,apiKey.value parameters, configure the + # basic.username,basic.password,apiKey.headerName,apiKey.value parameters in External Secrets Manager, and enter the external secret for the credentials + # make sure the keys in the secret should be CUSTOM_ARTIFACTORY_USERNAME , CUSTOM_ARTIFACTORY_PASSWORD , CUSTOM_ARTIFACTORY_APIKEY_HEADER , CUSTOM_ARTIFACTORY_APIKEY + external_secret_name: "" + # Leave customArtifactory.enableSSLVerification enabled to ensure secure access to your custom artifactory; + # when customArtifactory.enableSSLVerification is false, SSL verification is skipped and establishes an insecure connection. + enableSSLVerification: true + # Provide a required domain certificate for your custom artifactory; if none is required, leave this field blank. + certificate: + + docker: + # If using a custom Docker registry, supply the credentials here to pull Docker images. + registry: + url: "YOUR_DOCKER_REGISTRY" + username: "YOUR_DOCKER_REGISTRY_USERNAME" + password: "YOUR_DOCKER_REGISTRY_PASSWORD" + # To avoid exposing Docker registry details, create secrets to manage your Docker registry credentials. + # Specify secret names as an array of comma-separated strings in double quotation marks using the imagePullSecretNames parameter. For example: ["secret1", "secret2"] + imagePullSecretNames: [] + # Docker image information for the Pega docker image, containing the application server. + pega: + image: "pegasystems/pega" + + utilityImages: + busybox: + image: busybox:1.31.0 + imagePullPolicy: IfNotPresent + k8s_wait_for: + image: pegasystems/k8s-wait-for + imagePullPolicy: "IfNotPresent" + # waitTimeSeconds: 2 + # maxRetries: 1 + + # Upgrade specific properties + upgrade: + # Configure only for aks/pks + # Run "kubectl cluster-info" command to get the service host and https service port of kubernetes api server. + # Example - Kubernetes master is running at https://: + kube-apiserver: + serviceHost: "API_SERVICE_ADDRESS" + httpsServicePort: "SERVICE_PORT_HTTPS" + + # Set the `compressedConfigurations` parameter to `true` when the configuration files under charts/pega/config/deploy are in compressed format. + # For more information, see the “Pega compressed configuration files” section in the Pega Helm chart documentation. + compressedConfigurations: false + + # Specify the Pega tiers to deploy + tier: + - name: "web" + # Create a an interactive tier for web users. This tier uses + # the WebUser node type and will be exposed via a service to + # the load balancer. + nodeType: "WebUser" + + # Pega requestor specific properties + requestor: + # Inactivity time after which requestor is passivated + passivationTimeSec: 900 + + + service: + # For help configuring the service block, see the Helm chart documentation + # https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#service + httpEnabled: true + port: 80 + targetPort: 8080 + # Use this parameter to deploy a specific type of service using the serviceType parameter and specify the type of service in double quotes. + # This is an optional value and should be used based on the use case. + # This should be set only in case of eks, gke and other cloud providers. This option should not be used for k8s and minikube. + # For example if you want to deploy a service of type LoadBalancer, uncomment the following line and specify serviceType: "LoadBalancer" + # serviceType: "" + # Specify the CIDR ranges to restrict the service access to the given CIDR range. + # Each new CIDR block should be added in a separate line. + # Should be used only when serviceType is set to LoadBalancer. + # Uncomment the following lines and replace the CIDR blocks with your configuration requirements. + # loadBalancerSourceRanges: + # - "123.123.123.0/24" + # - "128.128.128.64/32" + # To configure TLS between the ingress/load balancer and the backend, set the following: + tls: + enabled: true + # To avoid entering the certificate values in plain text, configure the keystore, keystorepassword, cacertificate parameter + # values in the External Secrets Manager, and enter the external secret name below + # make sure the keys in the secret should be TOMCAT_KEYSTORE_CONTENT, TOMCAT_KEYSTORE_PASSWORD and ca.crt respectively + external_secret_names: [] + # If using tools like cert-manager to generate certificates, please provide the keystore name that is autogenerated by the external tool. + external_keystore_name: "EXTERNAL_KEYSTORE_NAME" + # If using external secrets operator and not using standard Password Key, please provide the key for keystore password. + # Default is TOMCAT_KEYSTORE_PASSWORD + external_keystore_password: "EXTERNAL_KEYSTORE_PASSWORD" + keystore: + keystorepassword: + port: 443 + targetPort: 8443 + # set the value of CA certificate here in case of baremetal/openshift deployments - CA certificate should be in base64 format + # pass the certificateChainFile file if you are using certificateFile and certificateKeyFile + cacertificate: + # provide the SSL certificate and private key as a PEM format + certificateFile: + certificateKeyFile: + # if you will deploy traefik addon chart and enable traefik, set enabled=true; otherwise leave the default setting. + traefik: + enabled: false + # the SAN of the certificate present inside the container + serverName: "" + # set insecureSkipVerify=true, if the certificate verification has to be skipped + insecureSkipVerify: false + + ingress: + # For help configuring the ingress block including TLS, see the Helm chart documentation + # https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#ingress + + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_WEB_NODE_DOMAIN" + # Configure custom path for given host along with pathType. Default pathType is ImplementationSpecific. + # path: + # pathType: + tls: + # Enable TLS encryption + enabled: true + # secretName: + # useManagedCertificate: false + # ssl_annotation: + # For Openshift, Pega deployments enable TLS to secure the connection + # from the browser to the router by creating the route using reencrypt termination policy. + # Add your certificate, the corresponding key using the appropriate .pem or .crt format and + # specify a CA certificate to validate the endpoint certificate. + certificate: + key: + cacertificate: + + replicas: 1 + javaOpts: "" + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + + livenessProbe: + port: 8081 + + # Optionally overridde default resource specifications + # cpuRequest: 2 + # memRequest: "12Gi" + # cpuLimit: 4 + # memLimit: "12Gi" + # initialHeap: "4096m" + # maxHeap: "8192m" + + # To configure an alternative user for custom image, set value for runAsUser. + # To configure an alternative group for volume mounts, set value for fsGroup + # See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context + # securityContext: + # runAsUser: 9001 + # fsGroup: 0 + + hpa: + enabled: true + + # Set enabled to true to include a Pod Disruption Budget for this tier. + # To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable + # value and comment out the other parameter. + pdb: + enabled: false + minAvailable: 1 + # maxUnavailable: "50%" + + - name: "batch" + # Create a background tier for batch processing. This tier uses + # a collection of background node types and will not be exposed to + # the load balancer. + nodeType: "BackgroundProcessing,Search,Batch,RealTime,Custom1,Custom2,Custom3,Custom4,Custom5,BIX" + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + + livenessProbe: + port: 8081 + + # To configure an alternative user for your custom image, set value for runAsUser + # To configure an alternative group for volume mounts, set value for fsGroup + # See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context + # securityContext: + # runAsUser: 9001 + # fsGroup: 0 + + hpa: + enabled: true + + # Set enabled to true to include a Pod Disruption Budget for this tier. + # To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable + # value and comment out the other parameter. + pdb: + enabled: false + minAvailable: 1 + # maxUnavailable: "50%" + + - name: "stream" + # Create a stream tier for queue processing. This tier deploys + # as a stateful set to ensure durability of queued data. It may + # be optionally exposed to the load balancer. + # Note: Stream tier is deprecated, please enable externalized Kafka service configuration under External Services. + # When externalized Kafka service is enabled, we should remove the entire stream tier. + nodeType: "Stream" + + # Pega requestor specific properties + requestor: + # Inactivity time after which requestor is passivated + passivationTimeSec: 900 + + service: + port: 7003 + targetPort: 7003 + + # If a nodeSelector is required for this or any tier, it may be specified here: + # nodeSelector: + # disktype: ssd + + ingress: + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_STREAM_NODE_DOMAIN" + tls: + # Enable TLS encryption + enabled: true + # secretName: + # useManagedCertificate: false + # ssl_annotation: + + livenessProbe: + port: 8081 + + # To configure an alternative user for your custom image, set value for runAsUser + # To configure an alternative group for volume mounts, set value for fsGroup + # See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context + # securityContext: + # runAsUser: 9001 + # fsGroup: 0 + + replicas: 2 + + volumeClaimTemplate: + resources: + requests: + storage: 5Gi + + # Set enabled to true to include a Pod Disruption Budget for this tier. + # To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable + # value and comment out the other parameter. + pdb: + enabled: false + minAvailable: 1 + # maxUnavailable: "50%" + +# External services + +# Cassandra automatic deployment settings. +cassandra: + enabled: true + persistence: + enabled: true + resources: + requests: + memory: "4Gi" + cpu: 2 + limits: + memory: "8Gi" + cpu: 4 + +# DDS (external Cassandra) connection settings. +# These settings should only be modified if you are using a custom Cassandra deployment. +# To deploy Pega without Cassandra, comment out or delete the following dds section and set +# the cassandra.enabled property above to false. +dds: + # A comma separated list of hosts in the Cassandra cluster. + externalNodes: "" + # TCP Port to connect to cassandra. + port: "9042" + # The username for authentication with the Cassandra cluster. + username: "dnode_ext" + # The password for authentication with the Cassandra cluster. + password: "dnode_ext" + # Whether to enable client encryption on the Cassandra connection. + clientEncryption: false + # If required, provide the trustStore certificate file name. + # When using a trustStore certificate, you must also include a Kubernetes secret name that contains the trustStore certificate in the global.certificatesSecrets parameter. + # Pega deployments only support trustStores using the Java Key Store (.jks) format. + trustStore: "" + # If required provide trustStorePassword value in plain text. + trustStorePassword: "" + # If required, provide the keystore certificate file name. + # When using a keystore certificate, you must also include a Kubernetes secret name that contains the keystore certificate in the global.certificatesSecrets parameter. + # Pega deployments only support keystore using the Java Key Store (.jks) format. + keyStore: "" + # If required provide keyStorePassword value in plain text. + keyStorePassword: "" + # To avoid exposing username,password,trustStorePassword,keyStorePassword parameters, configure the + # username,password,trustStorePassword,keyStorePassword parameters in External Secrets Manager, and enter the external secret for the credentials + # make sure the keys in the secret should be CASSANDRA_USERNAME, CASSANDRA_PASSWORD , CASSANDRA_TRUSTSTORE_PASSWORD , CASSANDRA_KEYSTORE_PASSWORD + external_secret_name: "" + # Enable asynchronous processing of records in DDS Dataset save operation. Failures to store individual records will + # not interrupt Dataset save operations. + asyncProcessingEnabled: false + # Specify a prefix to use when creating Pega-managed keyspaces in Cassandra. + keyspacesPrefix: "" + # Enable an extended token aware policy for use when a Cassandra range query runs. When enabled this policy selects a + # token from the token range to determine which Cassandra node to send the request. Before you can enable this policy, + # you must configure the token range partitioner. + extendedTokenAwarePolicy: false + # Enable a latency awareness policy, which collects the latencies of the queries for each Cassandra node and maintains + # a per-node latency score (an average). + latencyAwarePolicy: false + # Enable the use of a customized retry policy for your Pega Platform deployment. After enabling this policy in your + # deployment configuration, Cassandra queries that timeout will be retried. The number of retries may be configured + # using the dynamic system setting (DSS): dnode/cassandra_custom_retry_policy/retryCount. If not configured, queries + # will be retried once. + customRetryPolicy: false + # Enable the use of a customized retry policy for your Pega Platform deployment for Pega Platform ’23 and earlier + # releases. After you enable this policy in your deployment configuration, the deployment retries Cassandra queries + # that time out. Configure the number of retries using the dynamic system setting (DSS): + # dnode/cassandra_custom_retry_policy/retryCount. The default is 1, so if you do not specify a retry count, timed out + # queries are retried once. + customRetryPolicyEnabled: false + # Use this parameter in Pega Platform '24 and later instead of `customRetryPolicy`. Configure the number of retries + # using the `customRetryPolicyCount` property. + customRetryPolicyCount: 1 + # Specify the number of retry attempts when `customRetryPolicyEnabled` is true. For Pega Platform '23 and earlier + # releases use the dynamic system setting (DSS): dnode/cassandra_custom_retry_policy/retryCount. + speculativeExecutionPolicy: false + # Enable the speculative execution policy for retrieving data from your Cassandra service for Pega Platform '23 and + # earlier releases. When enabled, Pega Platform sends a query to multiple nodes in your Cassandra service and + # processes the first response. This provides lower perceived latencies for your deployment, but puts greater load + # on your Cassandra service. Configure the speculative execution delay and max executions using the following dynamic + # system settings (DSS): dnode/cassandra_speculative_execution_policy/delay and + # dnode/cassandra_speculative_execution_policy/max_executions. + speculativeExecutionPolicyEnabled: false + # Use this parameter in Pega Platform '24 and later instead of `speculativeExecutionPolicy`. Configure the + # speculative execution delay and max executions using the `speculativeExecutionPolicyDelay` and + # `speculativeExecutionPolicyMaxExecutions` properties. + speculativeExecutionPolicyDelay: 100 + # Specify the delay in milliseconds before speculative executions are made when `speculativeExecutionPolicyEnabled` is + # true. For Pega Platform '23 and earlier releases use the dynamic system setting (DSS): + # dnode/cassandra_speculative_execution_policy/delay. + speculativeExecutionPolicyMaxExecutions: 2 + # Specify the maximum number of speculative execution attempts when `speculativeExecutionPolicyEnabled` is true. For + # Pega Platform '23 and earlier releases use the dynamic system setting (DSS): + # dnode/cassandra_speculative_execution_policy/max_executions. + jmxMetricsEnabled: true + # Enable reporting of DDS SDK metrics to a Comma Separated Value (CSV) format for use by your organization to monitor + # your Cassandra service. If you enable this property, use the Pega Platform DSS: + # dnode/ddsclient/metrics/csv_directory to customize the filepath to which the deployment writes CSV files. By + # default, after you enable this property, CSV files will be written to the Pega Platform work directory. + csvMetricsEnabled: false + # Enable reporting of DDS SDK metrics to your Pega Platform logs. + logMetricsEnabled: false + +# Elasticsearch deployment settings. +# Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack. +# These search nodes will be deployed regardless of the Elasticsearch configuration above. +# Refer to README document to configure `Search and Reporting Service` as a search functionality provider under this section. +pegasearch: + image: "pegasystems/search" + memLimit: "3Gi" + replicas: 1 + +# Pega Installer settings. +installer: + image: "YOUR_INSTALLER_IMAGE:TAG" + # Set the initial administrator@pega.com password for your installation. This will need to be changed at first login. + # The adminPassword value cannot start with "@". + adminPassword: "ADMIN_PASSWORD" + # Upgrade specific properties + upgrade: + # Type of upgrade + # Valid upgradeType values are 'in-place' , 'zero-downtime' , 'custom' , 'out-of-place-rules' , 'out-of-place-data' . + upgradeType: "in-place" + # Specify a name for a target rules schema that the upgrade process creates for patches and upgrades. + targetRulesSchema: "" + # Specify a name for a target data schema that the upgrade process creates for patches and upgrades. + # For postgres databases that you are upgrading from Pega Infinity version 8.4.0 and later + # And for Oracle databases that you are upgrading from Pega Infinity version 8.4.3 and later. + targetDataSchema: "" + # Specify the username and password to access the pre-upgrade Pega Platform to perform pre- and post- actions during zero-downtime upgrades. + pegaRESTUsername: "" + pegaRESTPassword: "" + +# Hazelcast settings (applicable from Pega 8.6) +hazelcast: + # Hazelcast docker image for platform version 8.6 through 8.7.x + image: "YOUR_HAZELCAST_IMAGE:TAG" + # Hazelcast docker image for platform version 8.8 and later + clusteringServiceImage: "YOUR_CLUSTERING_SERVICE_IMAGE:TAG" + + # Setting below to true will deploy Pega Platform using a client-server Hazelcast model for version 8.6 through 8.7.x. + # Note: Make sure to set this value as "false" in case of Pega Platform version before "8.6". If not set this will fail the installation. + enabled: true + + # Setting below to true will deploy Pega Platform using a client-server Hazelcast model for version 8.8 and later. + clusteringServiceEnabled: false + + # Setting related to Hazelcast migration. + migration: + # Set to `true` to initiate the migration job. + initiateMigration: false + # Reference the `platform/clustering-service-kubectl` Docker image to create the migration job. + migrationJobImage: "YOUR_MIGRATION_JOB_IMAGE:TAG" + # Set to `true` when migrating from embedded Hazelcast. + embeddedToCSMigration: false + + # No. of initial members to join + replicas: 3 + # UserName in the client-server Hazelcast model authentication. This setting is exposed and not secure. + username: "" + # Password in the client-server Hazelcast model authentication. This setting is exposed and not secure. + password: "" + # To avoid exposing username and password parameters, leave these parameters empty and configure + # these cluster settings using an External Secrets Manager. Use the following keys in the secret: + # HZ_CS_AUTH_USERNAME for username and HZ_CS_AUTH_PASSWORD for password. + # Enter the external secret for these credentials below. + external_secret_name: "" + +# Stream (externalized Kafka service) settings. +stream: + # Beginning with Pega Platform '23, enabled by default; when disabled, your deployment does not use a"Kafka stream service" configuration. + enabled: true + # Provide externalized Kafka service broker urls. + bootstrapServer: "" + # Provide Security Protocol used to communicate with kafka brokers. Supported values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. + securityProtocol: PLAINTEXT + # If required, provide trustStore certificate file name + # When using a trustStore certificate, you must also include a Kubernetes secret name, that contains the trustStore certificate, + # in the global.certificatesSecrets parameter. + # Pega deployments only support trustStores using the Java Key Store (.jks) format. + trustStore: "" + # If required provide trustStorePassword value in plain text. + trustStorePassword: "" + # If required, provide keyStore certificate file name + # When using a keyStore certificate, you must also include a Kubernetes secret name, that contains the keyStore certificate, + # in the global.certificatesSecrets parameter. + # Pega deployments only support keyStores using the Java Key Store (.jks) format. + keyStore: "" + # If required, provide keyStore value in plain text. + keyStorePassword: "" + # If required, provide jaasConfig value in plain text. + jaasConfig: "" + # If required, provide a SASL mechanism**. Supported values are: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. + saslMechanism: PLAIN + # By default, topics originating from Pega Platform have the pega- prefix, + # so that it is easy to distinguish them from topics created by other applications. + # Pega supports customizing the name pattern for your Externalized Kafka configuration for each deployment. + streamNamePattern: "pega-{stream.name}" + # Your replicationFactor value cannot be more than the number of Kafka brokers and 3. + replicationFactor: "1" + # To avoid exposing trustStorePassword, keyStorePassword, and jaasConfig parameters, leave the values empty and + # configure them using an External Secrets Manager, making sure you configure the keys in the secret in the order: + # STREAM_TRUSTSTORE_PASSWORD, STREAM_KEYSTORE_PASSWORD and STREAM_JAAS_CONFIG. + # Enter the external secret name below. + external_secret_name: "" diff --git a/terratest/src/test/pega/pega-tier-deployment_test.go b/terratest/src/test/pega/pega-tier-deployment_test.go index 39ed19f46..38b5eebc4 100644 --- a/terratest/src/test/pega/pega-tier-deployment_test.go +++ b/terratest/src/test/pega/pega-tier-deployment_test.go @@ -90,6 +90,7 @@ func TestPegaTierDeploymentWithFSGroup(t *testing.T) { "global.tier[2].securityContext.fsGroup": key, // stream tier }, } + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-deployment.yaml"}) yamlSplit := strings.Split(yamlContent, "---") @@ -165,7 +166,6 @@ func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec pegaDeplo for i := 0; i < count; i++ { actualInitContainerNames[i] = actualInitContainers[i].Name } - //require.Equal(t, expectedSpec.initContainers, actualInitContainerNames) NEED TO CHANGE FOR "install-deploy" VerifyInitContainerData(t, actualInitContainers, options) require.Equal(t, "pega-web-tomcat", pod.Containers[0].Name) @@ -190,6 +190,14 @@ func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec pegaDeplo require.Equal(t, "COSMOS_SETTINGS", pod.Containers[0].Env[envIndex].Name) require.Equal(t, "Pega-UIEngine/cosmosservicesURI=/c11n", pod.Containers[0].Env[envIndex].Value) } + if options.ValuesFiles != nil && expectedSpec.name == getObjName(options, "-web") && options.SetStrValues["service.tls.enabled"] == "true" { + envIndex++ + require.Equal(t, "EXTERNAL_KEYSTORE_NAME", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "EXTERNAL_KEYSTORE_NAME", pod.Containers[0].Env[envIndex].Value) + envIndex++ + require.Equal(t, "EXTERNAL_KEYSTORE_PASSWORD", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "EXTERNAL_KEYSTORE_PASSWORD", pod.Containers[0].Env[envIndex].Value) + } envIndex++ require.Equal(t, "JAVA_OPTS", pod.Containers[0].Env[envIndex].Name) require.Equal(t, "", pod.Containers[0].Env[envIndex].Value) diff --git a/terratest/src/test/pega/pega_tier_deployment_with_tls_enabled_test.go b/terratest/src/test/pega/pega_tier_deployment_with_tls_enabled_test.go new file mode 100644 index 000000000..5d3be6121 --- /dev/null +++ b/terratest/src/test/pega/pega_tier_deployment_with_tls_enabled_test.go @@ -0,0 +1,52 @@ +package pega + +import ( + "fmt" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + "path/filepath" + "strings" + "testing" +) + +func TestPegaTierDeploymentWithTlsEnabled(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + for _, vendor := range supportedVendors { + + for _, operation := range supportedOperations { + + for _, depName := range deploymentNames { + + fmt.Println(vendor + "-" + operation) + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": vendor, + "global.actions.execute": operation, + "global.deployment.name": depName, + "installer.upgrade.upgradeType": "zero-downtime", + "global.storageClassName": "storage-class", + }, + ValuesFiles: []string{"data/pega-tier-service-override_values.yaml"}, + SetStrValues: map[string]string{ + "service.tls.enabled": "true", + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-deployment.yaml"}) + yamlSplit := strings.Split(yamlContent, "---") + assertWeb(t, yamlSplit[1], options) + assertBatch(t, yamlSplit[2], options) + assertStream(t, yamlSplit[3], options) + assertStreamWithSorageClass(t, yamlSplit[3], options) + + } + } + } +}