From a6f6f0dbfbfebe49328ebc648f4c8fab324dcdc7 Mon Sep 17 00:00:00 2001 From: Davis Walsh Date: Wed, 25 Oct 2023 13:07:10 -0400 Subject: [PATCH 01/12] Support custom labels for HPA and PDB (#657) * support custom labels on HPA * support custom labels on PDB --- charts/pega/templates/_pega-pdb.tpl | 4 + charts/pega/templates/_pega_hpa.tpl | 4 + .../pega/data/values_hpa_custom_label.yaml | 15 + .../pega/data/values_pdb_custom_labels.yaml | 293 ++++++++++++++++++ terratest/src/test/pega/pega-tier-hpa_test.go | 261 ++++++++++------ terratest/src/test/pega/pega-tier-pdb_test.go | 65 ++++ 6 files changed, 542 insertions(+), 100 deletions(-) create mode 100644 terratest/src/test/pega/data/values_hpa_custom_label.yaml create mode 100644 terratest/src/test/pega/data/values_pdb_custom_labels.yaml diff --git a/charts/pega/templates/_pega-pdb.tpl b/charts/pega/templates/_pega-pdb.tpl index f76eaccd9..d9423b6ae 100644 --- a/charts/pega/templates/_pega-pdb.tpl +++ b/charts/pega/templates/_pega-pdb.tpl @@ -10,6 +10,10 @@ kind: PodDisruptionBudget metadata: name: {{ .name }}-pdb namespace: {{ .root.Release.Namespace }} +{{- if .pdb.labels }} + labels: +{{ toYaml .pdb.labels | indent 4 }} +{{- end }} spec: {{- if .pdb.minAvailable }} minAvailable: {{ .pdb.minAvailable }} diff --git a/charts/pega/templates/_pega_hpa.tpl b/charts/pega/templates/_pega_hpa.tpl index 0857131db..7bf84bca6 100644 --- a/charts/pega/templates/_pega_hpa.tpl +++ b/charts/pega/templates/_pega_hpa.tpl @@ -10,6 +10,10 @@ kind: HorizontalPodAutoscaler metadata: name: {{ .name | quote}} namespace: {{ .root.Release.Namespace }} +{{- if .hpa.labels }} + labels: +{{ toYaml .hpa.labels | indent 4 }} +{{- end }} spec: scaleTargetRef: apiVersion: apps/v1 diff --git a/terratest/src/test/pega/data/values_hpa_custom_label.yaml b/terratest/src/test/pega/data/values_hpa_custom_label.yaml new file mode 100644 index 000000000..935407c7f --- /dev/null +++ b/terratest/src/test/pega/data/values_hpa_custom_label.yaml @@ -0,0 +1,15 @@ +--- +global: + tier: + - name: "web" + hpa: + enabled: true + labels: + web-label: "somevalue" + web-other-label: "someothervalue" + - name: "batch" + hpa: + enabled: true + labels: + batch-label: "batchlabel" + batch-other-label: "anothervalue" diff --git a/terratest/src/test/pega/data/values_pdb_custom_labels.yaml b/terratest/src/test/pega/data/values_pdb_custom_labels.yaml new file mode 100644 index 000000000..e06275c8c --- /dev/null +++ b/terratest/src/test/pega/data/values_pdb_custom_labels.yaml @@ -0,0 +1,293 @@ +--- +global: + # This values.yaml file is an example. For more information about + # each configuration option, see the project readme. + + # Enter your Kubernetes provider. + provider: "YOUR_KUBERNETES_PROVIDER" + + deployment: + # The name specified will be used to prefix all of the Pega pods (replacing "pega" with something like "app1-dev"). + name: "pega" + + # Deploy Pega nodes + actions: + execute: "deploy" + + # Provide JDBC connection information to the Pega relational database + # If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties. + jdbc: + # url Valid values are: + # + # Oracle jdbc:oracle:thin:@//localhost:1521/dbName + # IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName + # IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true; + # progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; + # SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false + # PostgreSQL jdbc:postgresql://localhost:5432/dbName + url: "YOUR_JDBC_URL" + # driverClass -- jdbc class. Valid values are: + # + # Oracle oracle.jdbc.OracleDriver + # IBM DB/2 com.ibm.db2.jcc.DB2Driver + # SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver + # PostgreSQL org.postgresql.Driver + driverClass: "YOUR_JDBC_DRIVER_CLASS" + # pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres + dbType: "YOUR_DATABASE_TYPE" + # For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri' + driverUri: "YOUR_JDBC_DRIVER_URI" + username: "YOUR_JDBC_USERNAME" + password: "YOUR_JDBC_PASSWORD" + # CUSTOM CONNECTION PROPERTIES + # Add a list of ; delimited connections properties. The list must end with ; + # For example: connectionProperties=user=usr;password=pwd; + connectionProperties: "" + rulesSchema: "YOUR_RULES_SCHEMA" + dataSchema: "YOUR_DATA_SCHEMA" + customerDataSchema: "" + + # If using a custom Docker registry, supply the credentials here to pull Docker images. + docker: + registry: + url: "YOUR_DOCKER_REGISTRY" + username: "YOUR_DOCKER_REGISTRY_USERNAME" + password: "YOUR_DOCKER_REGISTRY_PASSWORD" + # Docker image information for the Pega docker image, containing the application server. + pega: + image: "pegasystems/pega" + + # Upgrade specific properties + upgrade: + # Configure only for aks/pks + # Run "kubectl cluster-info" command to get the service host and https service port of kubernetes api server. + # Example - Kubernetes master is running at https://: + kube-apiserver: + serviceHost: "API_SERVICE_ADDRESS" + httpsServicePort: "SERVICE_PORT_HTTPS" + + # Specify the Pega tiers to deploy + tier: + - name: "web" + # Create a an interactive tier for web users. This tier uses + # the WebUser node type and will be exposed via a service to + # the load balancer. + nodeType: "WebUser" + + # Pega requestor specific properties + requestor: + # Inactivity time after which requestor is passivated + passivationTimeSec: 900 + + service: + # For help configuring the service block, see the Helm chart documentation + # https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#service + port: 80 + targetPort: 8080 + + ingress: + # For help configuring the ingress block including TLS, see the Helm chart documentation + # https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#ingress + + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_WEB_NODE_DOMAIN" + tls: + # Enable TLS encryption + enabled: true + # secretName: + # useManagedCertificate: false + # ssl_annotation: + + replicas: 1 + javaOpts: "" + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + + livenessProbe: + port: 8081 + + # Optionally overridde default resource specifications + # cpuRequest: 2 + # memRequest: "12Gi" + # cpuLimit: 4 + # memLimit: "12Gi" + # initialHeap: "4096m" + # maxHeap: "8192m" + + # To configure an alternative user for custom image, set value for runAsUser. + # See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context + # securityContext: + # runAsUser: 9001 + + hpa: + enabled: true + + # Set enabled to true to include a Pod Disruption Budget for this tier + pdb: + enabled: true + minAvailable: 1 + labels: + weblabel: "somevalue" + anotherlabel: "anothervalue" + + - name: "batch" + # Create a background tier for batch processing. This tier uses + # a collection of background node types and will not be exposed to + # the load balancer. + nodeType: "BackgroundProcessing,Search,Batch,RealTime,Custom1,Custom2,Custom3,Custom4,Custom5,BIX" + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + + livenessProbe: + port: 8081 + + # To configure an alternative user for your custom image, set value for runAsUser + # See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context + # securityContext: + # runAsUser: 9001 + + hpa: + enabled: true + + # Set enabled to true to include a Pod Disruption Budget for this tier + pdb: + enabled: true + minAvailable: 1 + labels: + batchlabel: "batchvalue" + anotherbatchlabel: "batchvalue2" + + - name: "stream" + # Create a stream tier for queue processing. This tier deploys + # as a stateful set to ensure durability of queued data. It may + # be optionally exposed to the load balancer. + nodeType: "Stream" + + # Pega requestor specific properties + requestor: + # Inactivity time after which requestor is passivated + passivationTimeSec: 900 + + service: + port: 7003 + targetPort: 7003 + + # If a nodeSelector is required for this or any tier, it may be specified here: + # nodeSelector: + # disktype: ssd + + ingress: + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_STREAM_NODE_DOMAIN" + tls: + # Enable TLS encryption + enabled: true + # secretName: + # useManagedCertificate: false + # ssl_annotation: + + livenessProbe: + port: 8081 + + # To configure an alternative user for your custom image, set value for runAsUser + # See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context + # securityContext: + # runAsUser: 9001 + + replicas: 2 + + volumeClaimTemplate: + resources: + requests: + storage: 5Gi + + # Set enabled to true to include a Pod Disruption Budget for this tier + pdb: + enabled: true + minAvailable: 1 + # maxUnavailable: "50%" + +# External services + +# Cassandra automatic deployment settings. +cassandra: + enabled: true + persistence: + enabled: true + resources: + requests: + memory: "4Gi" + cpu: 2 + limits: + memory: "8Gi" + cpu: 4 + +# DDS (external Cassandra) connection settings. +# These settings should only be modified if you are using a custom Cassandra deployment. +dds: + externalNodes: "" + port: "9042" + username: "dnode_ext" + password: "dnode_ext" + clientEncryption: false + trustStore: "" + trustStorePassword: "" + keyStore: "" + keyStorePassword: "" + +# Elasticsearch deployment settings. +# Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack. +# These search nodes will be deployed regardless of the Elasticsearch configuration above. +# Refer to README document to configure `Search and Reporting Service` as a search functionality provider under this section. +pegasearch: + image: "pegasystems/search" + memLimit: "3Gi" + replicas: 1 + +# Pega Installer settings. +installer: + image: "YOUR_INSTALLER_IMAGE:TAG" + # Set the initial administrator@pega.com password for your installation. This will need to be changed at first login. + # The adminPassword value cannot start with "@". + adminPassword: "ADMIN_PASSWORD" + # Upgrade specific properties + upgrade: + # Type of upgrade + # Valid upgradeType values are 'in-place' , 'zero-downtime' , 'custom' , 'out-of-place-rules' , 'out-of-place-data' . + upgradeType: "in-place" + # Specify a name for a target rules schema that the upgrade process creates for patches and upgrades. + targetRulesSchema: "" + # Specify a name for a target data schema that the upgrade process creates for patches and upgrades. + # For postgres databases that you are upgrading from Pega Infinity version 8.4.0 and later + # And for Oracle databases that you are upgrading from Pega Infinity version 8.4.3 and later. + targetDataSchema: "" + +# Hazelcast settings (applicable from Pega 8.6) +hazelcast: + image: "YOUR_HAZELCAST_IMAGE:TAG" + # Setting below to true will deploy the infinity in client-server Hazelcast model + enabled: false + # No. of initial members to join + replicas: 3 + # UserName to be used in client-server Hazelcast model for authentication + username: "" + # Password to be used in client-server Hazelcast model for authentication + password: "" diff --git a/terratest/src/test/pega/pega-tier-hpa_test.go b/terratest/src/test/pega/pega-tier-hpa_test.go index 3b65e8d24..9e4facd75 100644 --- a/terratest/src/test/pega/pega-tier-hpa_test.go +++ b/terratest/src/test/pega/pega-tier-hpa_test.go @@ -8,15 +8,15 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/resource" autoscaling "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" ) func TestPegaTierHPA(t *testing.T) { - var supportedVendors = []string{"k8s", "openshift", "eks","gke","aks","pks"} - var supportedOperations = []string{"deploy","install-deploy","upgrade-deploy"} - var deploymentNames = []string{"pega","myapp-dev"} + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} helmChartPath, err := filepath.Abs(PegaHelmChartPath) require.NoError(t, err) @@ -25,48 +25,50 @@ func TestPegaTierHPA(t *testing.T) { for _, operation := range supportedOperations { - for _, depName := range deploymentNames { + for _, depName := range deploymentNames { - fmt.Println(vendor + "-" + operation) + fmt.Println(vendor + "-" + operation) - var options = &helm.Options{ - SetValues: map[string]string{ - "global.deployment.name": depName, - "global.provider": vendor, - "global.actions.execute": operation, + var options = &helm.Options{ + SetValues: map[string]string{ + "global.deployment.name": depName, + "global.provider": vendor, + "global.actions.execute": operation, "installer.upgrade.upgradeType": "zero-downtime", - }, - } - - yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}) - verifyPegaHPAs(t, yamlContent, options, []hpa{ - { - name: getObjName(options, "-web-hpa"), - targetRefName: getObjName(options, "-web"), - kind: "Deployment", - apiversion: "apps/v1", - cpu: true, - cpuValue: parseResourceValue(t, "2.55"), - }, - { - name: getObjName(options, "-batch-hpa"), - targetRefName: getObjName(options, "-batch"), - kind: "Deployment", - apiversion: "apps/v1", - cpu: true, - cpuValue: parseResourceValue(t, "2.55"), - }, - }) - } + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}) + verifyPegaHPAs(t, yamlContent, options, []hpa{ + { + name: getObjName(options, "-web-hpa"), + targetRefName: getObjName(options, "-web"), + kind: "Deployment", + apiversion: "apps/v1", + cpu: true, + cpuValue: parseResourceValue(t, "2.55"), + }, + { + name: getObjName(options, "-batch-hpa"), + targetRefName: getObjName(options, "-batch"), + kind: "Deployment", + apiversion: "apps/v1", + cpu: true, + cpuValue: parseResourceValue(t, "2.55"), + }, + }) + } } } } +func TestPegaTierHPAWithCustomLabel(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} -func TestPegaTierHPADisableTarget(t *testing.T) { - var supportedVendors = []string{"k8s", "openshift", "eks","gke","aks","pks"} - var supportedOperations = []string{"deploy","install-deploy","upgrade-deploy"} - var deploymentNames = []string{"pega","myapp-dev"} + expectedWebLabels := map[string]string{"web-label": "somevalue", "web-other-label": "someothervalue"} + expectedBatchLabels := map[string]string{"batch-label": "batchlabel", "batch-other-label": "anothervalue"} helmChartPath, err := filepath.Abs(PegaHelmChartPath) require.NoError(t, err) @@ -78,48 +80,100 @@ func TestPegaTierHPADisableTarget(t *testing.T) { for _, operation := range supportedOperations { - for _, depName := range deploymentNames { - fmt.Println(vendor + "-" + operation) + for _, depName := range deploymentNames { - var options = &helm.Options{ - SetValues: map[string]string{ - "global.deployment.name": depName, - "global.provider": vendor, - "global.actions.execute": operation, + fmt.Println(vendor + "-" + operation) + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.deployment.name": depName, + "global.provider": vendor, + "global.actions.execute": operation, "installer.upgrade.upgradeType": "zero-downtime", - }, - } - - - yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}, "--values", testsPath+"/data/values_hpa_disabletarget.yaml") - verifyPegaHPAs(t, yamlContent, options, []hpa{ - { - name: getObjName(options, "-web-hpa"), - targetRefName: getObjName(options, "-web"), - kind: "Deployment", - apiversion: "apps/v1", - mem: true, - memPercent: 85, - }, - { - name: getObjName(options, "-batch-hpa"), - targetRefName: getObjName(options, "-batch"), - kind: "Deployment", - apiversion: "apps/v1", - cpu: true, - cpuValue: parseResourceValue(t, "2.55"), - }, - }) - } + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}, "--values", testsPath+"/data/values_hpa_custom_label.yaml") + verifyPegaHPAs(t, yamlContent, options, []hpa{ + { + name: getObjName(options, "-web-hpa"), + targetRefName: getObjName(options, "-web"), + kind: "Deployment", + apiversion: "apps/v1", + labels: expectedWebLabels, + cpu: true, + cpuValue: parseResourceValue(t, "2.55"), + }, + { + name: getObjName(options, "-batch-hpa"), + targetRefName: getObjName(options, "-batch"), + kind: "Deployment", + apiversion: "apps/v1", + labels: expectedBatchLabels, + cpu: true, + cpuValue: parseResourceValue(t, "2.55"), + }, + }) + } } } } +func TestPegaTierHPADisableTarget(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + testsPath, err := filepath.Abs(PegaHelmChartTestsPath) + require.NoError(t, err) + + for _, vendor := range supportedVendors { + + for _, operation := range supportedOperations { + + for _, depName := range deploymentNames { + fmt.Println(vendor + "-" + operation) + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.deployment.name": depName, + "global.provider": vendor, + "global.actions.execute": operation, + "installer.upgrade.upgradeType": "zero-downtime", + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}, "--values", testsPath+"/data/values_hpa_disabletarget.yaml") + verifyPegaHPAs(t, yamlContent, options, []hpa{ + { + name: getObjName(options, "-web-hpa"), + targetRefName: getObjName(options, "-web"), + kind: "Deployment", + apiversion: "apps/v1", + mem: true, + memPercent: 85, + }, + { + name: getObjName(options, "-batch-hpa"), + targetRefName: getObjName(options, "-batch"), + kind: "Deployment", + apiversion: "apps/v1", + cpu: true, + cpuValue: parseResourceValue(t, "2.55"), + }, + }) + } + } + } +} func TestPegaTierOverrideValues(t *testing.T) { var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} - var deploymentNames = []string{"pega","myapp-dev"} + var deploymentNames = []string{"pega", "myapp-dev"} helmChartPath, err := filepath.Abs(PegaHelmChartPath) require.NoError(t, err) @@ -131,38 +185,38 @@ func TestPegaTierOverrideValues(t *testing.T) { for _, operation := range supportedOperations { - for _, depName := range deploymentNames { - fmt.Println(vendor + "-" + operation + "-" + depName) + for _, depName := range deploymentNames { + fmt.Println(vendor + "-" + operation + "-" + depName) - var options = &helm.Options{ - SetValues: map[string]string{ - "global.provider": vendor, - "global.actions.execute": operation, + var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": vendor, + "global.actions.execute": operation, "installer.upgrade.upgradeType": "zero-downtime", - }, - } - - yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}, "--values", testsPath+"/data/values_hpa_overridevalues.yaml") - verifyPegaHPAs(t, yamlContent, options, []hpa{ - { - name: getObjName(options, "-web-hpa"), - targetRefName: getObjName(options, "-web"), - kind: "Deployment", - apiversion: "apps/v1", - cpu: true, - cpuValue: parseResourceValue(t, "4.13"), - mem: true, - memPercent: 42, - }, - { - name: getObjName(options, "-batch-hpa"), - targetRefName: getObjName(options, "-batch"), - kind: "Deployment", - apiversion: "apps/v1", - cpu: true, - cpuPercent: 24, - }, - }) + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}, "--values", testsPath+"/data/values_hpa_overridevalues.yaml") + verifyPegaHPAs(t, yamlContent, options, []hpa{ + { + name: getObjName(options, "-web-hpa"), + targetRefName: getObjName(options, "-web"), + kind: "Deployment", + apiversion: "apps/v1", + cpu: true, + cpuValue: parseResourceValue(t, "4.13"), + mem: true, + memPercent: 42, + }, + { + name: getObjName(options, "-batch-hpa"), + targetRefName: getObjName(options, "-batch"), + kind: "Deployment", + apiversion: "apps/v1", + cpu: true, + cpuPercent: 24, + }, + }) } } } @@ -210,6 +264,12 @@ func verifyPegaHpa(t *testing.T, hpaObj *autoscaling.HorizontalPodAutoscaler, ex currentMetricIndex++ } + for key, expectedValue := range expectedHpa.labels { + actual := hpaObj.Labels[key] + require.NotNil(t, actual) + require.Equal(t, expectedValue, actual) + } + require.Equal(t, int32(5), hpaObj.Spec.MaxReplicas) } @@ -218,6 +278,7 @@ type hpa struct { targetRefName string kind string apiversion string + labels map[string]string cpu bool cpuValue resource.Quantity cpuPercent int32 diff --git a/terratest/src/test/pega/pega-tier-pdb_test.go b/terratest/src/test/pega/pega-tier-pdb_test.go index b8ddb1a4f..0997267ce 100644 --- a/terratest/src/test/pega/pega-tier-pdb_test.go +++ b/terratest/src/test/pega/pega-tier-pdb_test.go @@ -65,6 +65,64 @@ func TestPegaTierPDBEnabled(t *testing.T) { } } +func TestPegaTierPDBWithCustomLabels(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} + + webPDBLabels := map[string]string{"weblabel": "somevalue", "anotherlabel": "anothervalue"} + batchPDBLabels := map[string]string{"batchlabel": "batchvalue", "anotherbatchlabel": "batchvalue2"} + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + testsPath, err := filepath.Abs(PegaHelmChartTestsPath) + require.NoError(t, err) + + for _, vendor := range supportedVendors { + + for _, operation := range supportedOperations { + + for _, depName := range deploymentNames { + + fmt.Println(vendor + "-" + operation) + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.deployment.name": depName, + "global.provider": vendor, + "global.actions.execute": operation, + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-pdb.yaml"}, "--values", testsPath+"/data/values_pdb_custom_labels.yaml") + verifyPegaPDBs(t, yamlContent, options, []pdb{ + { + name: getObjName(options, "-web-pdb"), + kind: "PodDisruptionBudget", + apiversion: "policy/v1beta1", + labels: webPDBLabels, + minAvailable: 1, + }, + { + name: getObjName(options, "-batch-pdb"), + kind: "PodDisruptionBudget", + apiversion: "policy/v1beta1", + labels: batchPDBLabels, + minAvailable: 1, + }, + { + name: getObjName(options, "-stream-pdb"), + kind: "PodDisruptionBudget", + apiversion: "policy/v1beta1", + minAvailable: 1, + }, + }) + } + } + } +} + // TestPegaTierPDBDisabled - verify that a PodDisruptionBudget is not created when global.tier.pdb.enabled=false func TestPegaTierPDBDisabled(t *testing.T) { var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} @@ -121,11 +179,18 @@ func verifyPegaPdb(t *testing.T, pegaPdbObj *v1beta1.PodDisruptionBudget, expect //kubernetes 1.21 or higher, and we should adjust this test to use the policy/v1 API version require.Equal(t, pegaPdbObj.TypeMeta.APIVersion, expectedPdb.apiversion) require.Equal(t, expectedPdb.minAvailable, pegaPdbObj.Spec.MinAvailable.IntVal) + + for key, expectedValue := range expectedPdb.labels { + actual := pegaPdbObj.Labels[key] + require.NotNil(t, actual) + require.Equal(t, expectedValue, actual) + } } type pdb struct { name string kind string apiversion string + labels map[string]string minAvailable int32 } From 0200cd316e7b60262e3f275075f784599796a379 Mon Sep 17 00:00:00 2001 From: vnihal72 <79415342+vnihal72@users.noreply.github.com> Date: Fri, 27 Oct 2023 12:17:17 +0530 Subject: [PATCH 02/12] Updated Hazelcast heap setting to derive from available RAM (#658) * Updated Hazelcast heap setting to derive from available RAM * Test case fixed --------- Co-authored-by: vermn1 Co-authored-by: MadhuriArugula --- charts/pega/charts/hazelcast/values.yaml | 4 ++-- .../test/pega/clustering-service-environment-config_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/pega/charts/hazelcast/values.yaml b/charts/pega/charts/hazelcast/values.yaml index 61b81441c..56079d303 100644 --- a/charts/pega/charts/hazelcast/values.yaml +++ b/charts/pega/charts/hazelcast/values.yaml @@ -38,8 +38,8 @@ client: clusterName: "PRPC" # Server side settings for Hazelcast server: - java_opts: "-Xms820m -Xmx820m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/hazelcast/logs/heapdump.hprof - -XX:+UseParallelGC -Xlog:gc*,gc+phases=debug:file=/opt/hazelcast/logs/gc.log:time,pid,tags:filecount=5,filesize=3m" + java_opts: "-XX:MaxRAMPercentage=80.0 -XX:InitialRAMPercentage=80.0 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/hazelcast/logs/heapdump.hprof + -XX:+UseParallelGC -Xlog:gc*,gc+phases=debug:file=/opt/hazelcast/logs/gc.log:time,pid,tags:filecount=5,filesize=3m -XshowSettings:vm" jmx_enabled: "true" health_monitoring_level: "OFF" operation_generic_thread_count: "" diff --git a/terratest/src/test/pega/clustering-service-environment-config_test.go b/terratest/src/test/pega/clustering-service-environment-config_test.go index 53470fb74..cca1a2c0f 100644 --- a/terratest/src/test/pega/clustering-service-environment-config_test.go +++ b/terratest/src/test/pega/clustering-service-environment-config_test.go @@ -49,7 +49,7 @@ func VerifyClusteringServiceEnvironmentConfig(t *testing.T, yamlContent string, UnmarshalK8SYaml(t, statefulInfo, &clusteringServiceEnvConfigMap) clusteringServiceEnvConfigData := clusteringServiceEnvConfigMap.Data require.Equal(t, clusteringServiceEnvConfigData["NAMESPACE"], "default") - require.Equal(t, clusteringServiceEnvConfigData["JAVA_OPTS"], "-Xms820m -Xmx820m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/hazelcast/logs/heapdump.hprof -XX:+UseParallelGC -Xlog:gc*,gc+phases=debug:file=/opt/hazelcast/logs/gc.log:time,pid,tags:filecount=5,filesize=3m") + require.Equal(t, clusteringServiceEnvConfigData["JAVA_OPTS"], "-XX:MaxRAMPercentage=80.0 -XX:InitialRAMPercentage=80.0 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/hazelcast/logs/heapdump.hprof -XX:+UseParallelGC -Xlog:gc*,gc+phases=debug:file=/opt/hazelcast/logs/gc.log:time,pid,tags:filecount=5,filesize=3m -XshowSettings:vm") require.Equal(t, clusteringServiceEnvConfigData["SERVICE_NAME"], "clusteringservice-service") require.Equal(t, clusteringServiceEnvConfigData["MIN_CLUSTER_SIZE"], "3") require.Equal(t, clusteringServiceEnvConfigData["JMX_ENABLED"], "true") From f8737ef3246fd3ce97d8ac3958317058c5957906 Mon Sep 17 00:00:00 2001 From: Divyansh Bhowmick <126799799+bhowd1@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:23:52 +0530 Subject: [PATCH 03/12] US-563606-1: Upgrade ES Server Version 7.10.2 to 7.17.9 for Internal Elasticsearch (#647) * US-563606-1: Upgrade ES Server Default Version * Updated Makefile to update certs in case of TLS * Updated make file for external secrets update * Removed legacy flag * Updated README file. * Updated Readme * Updated Readme * Update comment to explain es helm & server version * Lint changes * Updated Readme and Make file * Lint fixes * Lint fixes --------- Co-authored-by: MadhuriArugula --- charts/backingservices/Makefile | 12 +++++++++++ charts/backingservices/charts/srs/README.md | 24 +++++++++++++++++++-- charts/backingservices/requirements.yaml | 3 ++- charts/backingservices/values.yaml | 7 +++--- 4 files changed, 40 insertions(+), 6 deletions(-) diff --git a/charts/backingservices/Makefile b/charts/backingservices/Makefile index fbdc4e663..e83a7aa70 100644 --- a/charts/backingservices/Makefile +++ b/charts/backingservices/Makefile @@ -26,3 +26,15 @@ purge-es-secrets: external-es-secrets: kubectl create secret generic srs-certificates --from-file=$(PATH_TO_CERTIFICATE) --namespace=$(NAMESPACE) + +purge-srs-secrets: + kubectl delete secrets srs-certificates --namespace=$(NAMESPACE) || true + +purge-secrets: purge-es-secrets + make purge-srs-secrets + +update-secrets: purge-secrets + make es-prerequisite + +update-external-es-secrets: purge-srs-secrets + make external-es-secrets \ No newline at end of file diff --git a/charts/backingservices/charts/srs/README.md b/charts/backingservices/charts/srs/README.md index 1c2a5fbf4..feffbb959 100644 --- a/charts/backingservices/charts/srs/README.md +++ b/charts/backingservices/charts/srs/README.md @@ -57,7 +57,7 @@ To deploy Pega Platform with the SRS backing service, the SRS helm chart require | `deploymentName` | Specify the name of your SRS cluster. Your deployment creates resources prefixed with this string. This is also the service name for the SRS. | | `srsRuntime` | Use this section to define specific resource configuration options like image, replica count, cpu and memory resource settings in the SRS. | | `busybox` | When provisioning an internally managed Elasticsearch cluster, you can customize the location and pull policy of the Alpine image used during the deployment process by specifying `busybox.image` and `busybox.imagePullPolicy`. | -| `elasticsearch` | Define the elasticsearch cluster configurations. The [Elasticsearch](https://github.com/helm/charts/tree/master/stable/elasticsearch/values.yaml) chart defines the values for Elasticsearch provisioning in the SRS cluster. For internally provisioned Elasticsearch the default version is set to `7.10.2`. Set the `elasticsearch.imageTag` parameter in values.yaml to `7.16.3` to use this supported version in the SRS cluster. | +| `elasticsearch` | Define the elasticsearch cluster configurations. The [Elasticsearch](https://github.com/helm/charts/tree/master/stable/elasticsearch/values.yaml) chart defines the values for Elasticsearch provisioning in the SRS cluster. For internally provisioned Elasticsearch the default version is set to `7.17.9`. Set the `elasticsearch.imageTag` parameter in values.yaml to `7.16.3` to use this supported version in the SRS cluster. | | `k8sProvider` | Specify your Kubernetes provider name. Supported values are [`eks`, `aks`, `minikube`, `gke`, `openshift`, `pks`].. ### Enabling security between SRS and Elasticsearch @@ -78,7 +78,7 @@ make external-es-secrets NAMESPACE=pegabackingservices ELASTICSEARCH_VERSION=7.1 | `tls` | Set to `true` to enable the SRS service to authenticate to your organization's available Elasticsearch service. | | `esCredentials.username` | Enter the username for your available Elasticsearch service. This username value must match the values you set in the connection info section of esCredentials. | | `esCredentials.password` | Enter the required password for your available Elasticsearch service. This password value must match the values you set in the connection info section of esCredentials. | -| `srsStorage.provisionInternalESCluster` |
  • Set to false to disable this parameter and connect to your available Elasticsearch service from the SRS cluster. Disabling this setting requires you to provide connectivity details to your organization's external Elasticsearch service along with an appropriate TLS certificate with which you authenticate with the service. To pass the required certificate to the cluster using a secrets file, run the command, `$ make external-es-secrets NAMESPACE= ELASTICSEARCH_VERSION= PATH_TO_CERTIFICATE=`.
  • where NAMESPACE references your deployment namespace of the SRS cluster, `ELASTICSEARCH_VERSION` matches the Elasticsearch version you want to use, and `PATH_TO_CERTIFICATE` points to the location where you copied the required certificates on your location machine.
| +| `srsStorage.provisionInternalESCluster` |
  • Set to false to disable this parameter and connect to your available Elasticsearch service from the SRS cluster. Disabling this setting requires you to provide connectivity details to your organization's external Elasticsearch service along with an appropriate TLS certificate with which you authenticate with the service. To pass the required certificate to the cluster using a secrets file, run the command, `$ make external-es-secrets NAMESPACE= ELASTICSEARCH_VERSION= PATH_TO_CERTIFICATE=`.
  • where NAMESPACE references your deployment namespace of the SRS cluster, `ELASTICSEARCH_VERSION` matches the Elasticsearch version you want to use, and `PATH_TO_CERTIFICATE` points to the location where you copied the required certificates on your location machine.
  • Use the following Make command to update the SRS and External Elasticsearch certificates: `$ make update-external-es-secrets NAMESPACE= PATH_TO_CERTIFICATE=`.
| | `domain` | Enter the DNS entry associated with your external Elasticsearch service. | Note: Only .p12 and .jks certificates are supported. @@ -157,3 +157,23 @@ srs: requireInternetAccess: false ``` +### Steps to upgrade SRS (with Internal Elasticsearch) to Kubernetes Cluster Version >=1.25 + +To support SRS on Kubernetes version >=1.25 you need to use Elasticsearch server version 7.17.9. If you are using an earlier version (7.10.2 or 7.16.3) of Elasticsearch in your deployment, to upgrade to 7.17.9, you need to perform the following steps: +1. Get the latest backingservices Helm chart which supports `k8s version >=1.25`. +2. Update the SRS and Elasticsearch certificates by running the following Make command: + ```bash + make update-secrets NAMESPACE= ELASTICSEARCH_VERSION=7.17.9 + ``` +3. To use Elasticsearch version 7.17.9, inspect the values.yaml file from the latest backingservices helm chart and confirm if the imageTag parameter in the values.yaml file is same as in the example below: + ```yaml + elasticsearch: + imageTag: 7.17.9 + ``` +4. Upgrade your deployment using the following Helm command: + ```bash + helm upgrade backingservices pega/backingservices --version --namespace --values + ``` +5. Verify that the Elasticsearch pods status is Running. +6. Restart the old SRS pods and verify that the status of the new pods is Running. +7. Verify all pods are running and working as expected. \ No newline at end of file diff --git a/charts/backingservices/requirements.yaml b/charts/backingservices/requirements.yaml index 891dcd358..5bd71adf5 100644 --- a/charts/backingservices/requirements.yaml +++ b/charts/backingservices/requirements.yaml @@ -3,9 +3,10 @@ # NOTE: For kubernetes version >=1.25 or Elasticsearch version 7.17.9, # use 7.17.3 for the elasticsearch 'version' parameter below (for Elasticsearch version 7.17.9, you will still use 7.17.9 in the backingservices values.yaml). # To disable deploying Elasticsearch in SRS, set the 'srs.srsStorage.provisionInternalESCluster' parameter in backingservices values.yaml to false. +# The dependencies.version parameter refers to the Elastcisearch Helm chart version, not Elasticsearch server version. dependencies: - name: elasticsearch - version: "7.10.2" + version: "7.17.3" repository: https://helm.elastic.co/ condition: srs.srsStorage.provisionInternalESCluster - name: constellation diff --git a/charts/backingservices/values.yaml b/charts/backingservices/values.yaml index 502574921..b4a5cce18 100644 --- a/charts/backingservices/values.yaml +++ b/charts/backingservices/values.yaml @@ -80,9 +80,10 @@ constellation: # based on helm charts defined at https://github.com/elastic/helm-charts/tree/master/elasticsearch and may be modified # as per runtime and storage requirements. elasticsearch: - # for internally provisioned elasticsearch version is set to 7.10.2. Use this imageTag configuration to update it to 7.16.3 or - # 7.17.9 if required. However, we strongly recommend to use version 7.17.9. - imageTag: 7.10.2 + # For internally provisioned Elasticsearch server, the imageTag parameter is set by default to 7.17.9, which is the recommended Elasticsearch server version + # for k8s version >= 1.25. + # Use this parameter to change it to 7.10.2 or 7.16.3 for k8s version < 1.25 and make sure to update the Elasticsearch helm chart version in requirements.yaml. + imageTag: 7.17.9 # Permit co-located instances for solitary minikube virtual machines. antiAffinity: "soft" # Shrink default JVM heap. From fd4bf798f6e669b081881c1e2ee3a7465459d61b Mon Sep 17 00:00:00 2001 From: vargm Date: Fri, 27 Oct 2023 16:21:49 +0100 Subject: [PATCH 04/12] Issue #643: set same default initial and max heap value (#646) Co-authored-by: Davis Walsh --- charts/pega/README.md | 2 +- charts/pega/templates/_helpers.tpl | 2 +- terratest/src/test/pega/pega-tier-deployment_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/pega/README.md b/charts/pega/README.md index b8e8fba5b..e48632596 100644 --- a/charts/pega/README.md +++ b/charts/pega/README.md @@ -451,7 +451,7 @@ Parameter | Description | Defau `cpuLimit` | CPU limit for pods in the current tier. | `4` `memRequest` | Initial memory request for pods in the current tier. | `12Gi` `memLimit` | Memory limit for pods in the current tier. | `12Gi` -`initialHeap` | Specify the initial heap size of the JVM. | `4096m` +`initialHeap` | Specify the initial heap size of the JVM. | `8192m` `maxHeap` | Specify the maximum heap size of the JVM. | `8192m` ### JVM Arguments diff --git a/charts/pega/templates/_helpers.tpl b/charts/pega/templates/_helpers.tpl index 29232c51a..dfa3a714b 100644 --- a/charts/pega/templates/_helpers.tpl +++ b/charts/pega/templates/_helpers.tpl @@ -241,7 +241,7 @@ until cqlsh -u {{ $cassandraUser | quote }} -p {{ $cassandraPassword | quote }} {{- if .node.initialHeap }} value: "{{ .node.initialHeap }}" {{- else }} - value: "4096m" + value: "8192m" {{- end }} # Maximum JVM heap size, equivalent to -Xmx - name: MAX_HEAP diff --git a/terratest/src/test/pega/pega-tier-deployment_test.go b/terratest/src/test/pega/pega-tier-deployment_test.go index 5fde1ae94..39ed19f46 100644 --- a/terratest/src/test/pega/pega-tier-deployment_test.go +++ b/terratest/src/test/pega/pega-tier-deployment_test.go @@ -198,7 +198,7 @@ func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec pegaDeplo require.Equal(t, "", pod.Containers[0].Env[envIndex].Value) envIndex++ require.Equal(t, "INITIAL_HEAP", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "4096m", pod.Containers[0].Env[envIndex].Value) + require.Equal(t, "8192m", pod.Containers[0].Env[envIndex].Value) envIndex++ require.Equal(t, "MAX_HEAP", pod.Containers[0].Env[envIndex].Name) require.Equal(t, "8192m", pod.Containers[0].Env[envIndex].Value) From c157394ff83495712e47f2942882776c63fa46a7 Mon Sep 17 00:00:00 2001 From: Kinga Kowalska <120555574+kingakowalska1@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:22:37 +0100 Subject: [PATCH 05/12] BUG-836823 - Fixed typo "Zero-downtime" to "zero-downtime" (#664) --- docs/upgrading-pega-deployment-zero-downtime.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/upgrading-pega-deployment-zero-downtime.md b/docs/upgrading-pega-deployment-zero-downtime.md index a4d9326af..1b496d3b3 100644 --- a/docs/upgrading-pega-deployment-zero-downtime.md +++ b/docs/upgrading-pega-deployment-zero-downtime.md @@ -113,7 +113,7 @@ To complete an upgrade with zero downtime, configure the following settings in - In the installer section of the Helm chart, update the following: - Specify `installer.installerMountVolumeClaimName` persistent Volume Claim name. This is a client-managed PVC for mounting upgrade artifacts. - - Specify `installer.upgradeType: "Zero-downtime"` to use the zero-downtime upgrade process. + - Specify `installer.upgradeType: "zero-downtime"` to use the zero-downtime upgrade process. - Specify `installer.targetRulesSchema: ""` and `installer.targetDataSchema: ""` for the new target and data schema name that the process creates in your existing database for the upgrade process. - Specify `installer.upgrade.automaticResumeEnabled` to support resuming from point of failure @@ -202,4 +202,4 @@ In this document, you specify that the Helm chart always “deploys” by using - `action.execute: upgrade-deploy` - `installer.upgrade.upgradeType: custom` - `installer.upgrade.upgradeSteps: disable_cluster_upgrade` to run disable_cluster_upgrade -- Resume the upgrade process by using the `helm upgrade release --namespace mypega` command. For more information, see - [Upgrading your Pega Platform deployment using the command line](https://github.com/pegasystems/pega-helm-charts/blob/master/docs/upgrading-pega-deployment-zero-downtime.md#upgrading-your-pega-platform-deployment-using-the-command-line). \ No newline at end of file +- Resume the upgrade process by using the `helm upgrade release --namespace mypega` command. For more information, see - [Upgrading your Pega Platform deployment using the command line](https://github.com/pegasystems/pega-helm-charts/blob/master/docs/upgrading-pega-deployment-zero-downtime.md#upgrading-your-pega-platform-deployment-using-the-command-line). From 25ddd3c1f093871598f69c7135ee4b334bd91650 Mon Sep 17 00:00:00 2001 From: pega-Abhinav <110885740+pega-Abhinav@users.noreply.github.com> Date: Fri, 17 Nov 2023 11:52:00 +0530 Subject: [PATCH 06/12] BUG-830220 : Removed security/urlaccessmode configuration as it is no longer applicable (#666) --- charts/pega/Ephemeral-web-tier-values.yaml | 1 - charts/pega/config/deploy/prconfig.xml | 1 - terratest/src/test/pega/data/expectedInstallDeployPrconfig.xml | 1 - 3 files changed, 3 deletions(-) diff --git a/charts/pega/Ephemeral-web-tier-values.yaml b/charts/pega/Ephemeral-web-tier-values.yaml index f7159fbc1..4e5d9c99f 100644 --- a/charts/pega/Ephemeral-web-tier-values.yaml +++ b/charts/pega/Ephemeral-web-tier-values.yaml @@ -94,7 +94,6 @@ global: - diff --git a/charts/pega/config/deploy/prconfig.xml b/charts/pega/config/deploy/prconfig.xml index a72ea2f01..96e9c2e78 100644 --- a/charts/pega/config/deploy/prconfig.xml +++ b/charts/pega/config/deploy/prconfig.xml @@ -6,7 +6,6 @@ - diff --git a/terratest/src/test/pega/data/expectedInstallDeployPrconfig.xml b/terratest/src/test/pega/data/expectedInstallDeployPrconfig.xml index a72ea2f01..96e9c2e78 100644 --- a/terratest/src/test/pega/data/expectedInstallDeployPrconfig.xml +++ b/terratest/src/test/pega/data/expectedInstallDeployPrconfig.xml @@ -6,7 +6,6 @@ - From d2bdebfbb69fb6337410186c69b43b55a632c9e4 Mon Sep 17 00:00:00 2001 From: Kinga Kowalska <120555574+kingakowalska1@users.noreply.github.com> Date: Tue, 21 Nov 2023 15:18:38 +0100 Subject: [PATCH 07/12] [BUG-830515] Fixed link for Node types for VM-based and containerized deployments (#644) --- charts/pega/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/pega/README.md b/charts/pega/README.md index e48632596..7e4355b33 100644 --- a/charts/pega/README.md +++ b/charts/pega/README.md @@ -270,7 +270,7 @@ Node classification is the process of separating nodes by purpose, predefining t Specify the list of Pega node types for this deployment. For more information about valid node types, see the Pega Community article on [Node Classification]. -[Node types for client-managed cloud environments](https://community.pega.com/knowledgebase/articles/performance/node-classification) +[Node types for VM-based and containerized deployments](https://docs.pega.com/bundle/platform-88/page/platform/system-administration/node-types-on-premises.html) Example: From 88cca869773c91334b2d9725b7647e4862ed0768 Mon Sep 17 00:00:00 2001 From: Davis Walsh Date: Tue, 21 Nov 2023 17:55:50 -0500 Subject: [PATCH 08/12] Fix level for usage metric logging (#667) * Fix level for usage metric logging * update test --------- Co-authored-by: Adam Talbot --- charts/pega/config/deploy/prlog4j2.xml | 2 +- terratest/src/test/pega/data/expectedInstallDeployPRlog4j2.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/pega/config/deploy/prlog4j2.xml b/charts/pega/config/deploy/prlog4j2.xml index 88d7fc3e4..860959a77 100644 --- a/charts/pega/config/deploy/prlog4j2.xml +++ b/charts/pega/config/deploy/prlog4j2.xml @@ -160,7 +160,7 @@ - + diff --git a/terratest/src/test/pega/data/expectedInstallDeployPRlog4j2.xml b/terratest/src/test/pega/data/expectedInstallDeployPRlog4j2.xml index 3b4bb30fc..c1910278b 100644 --- a/terratest/src/test/pega/data/expectedInstallDeployPRlog4j2.xml +++ b/terratest/src/test/pega/data/expectedInstallDeployPRlog4j2.xml @@ -160,7 +160,7 @@ - + From 9ccd6b510493e83d7a33c56cfee2d886491c03b0 Mon Sep 17 00:00:00 2001 From: anilkumargedela <77138263+anilkumargedela@users.noreply.github.com> Date: Mon, 27 Nov 2023 15:15:28 +0530 Subject: [PATCH 09/12] SRS: Added truststore password input for TLS connection b/w SRS and ES (#669) * [SRS] Added truststore password input for TLS connection b/w SRS and ES --- charts/backingservices/charts/srs/README.md | 6 +++++- .../charts/srs/templates/srsservice_deployment.yaml | 2 ++ charts/backingservices/values.yaml | 4 +++- terratest/src/test/backingservices/srs-deployment_test.go | 3 +++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/charts/backingservices/charts/srs/README.md b/charts/backingservices/charts/srs/README.md index feffbb959..278d53fe2 100644 --- a/charts/backingservices/charts/srs/README.md +++ b/charts/backingservices/charts/srs/README.md @@ -76,6 +76,8 @@ make external-es-secrets NAMESPACE=pegabackingservices ELASTICSEARCH_VERSION=7.1 | Configuration | Usage | |-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `tls` | Set to `true` to enable the SRS service to authenticate to your organization's available Elasticsearch service. | +| `certificateName` | Enter the tls certificate name. Default certificate name will be "elastic-certificates.p12" if not used. | +| `certificatePassword` | Enter the tls certificate password if any. Default value will be empty if not used. | | `esCredentials.username` | Enter the username for your available Elasticsearch service. This username value must match the values you set in the connection info section of esCredentials. | | `esCredentials.password` | Enter the required password for your available Elasticsearch service. This password value must match the values you set in the connection info section of esCredentials. | | `srsStorage.provisionInternalESCluster` |
  • Set to false to disable this parameter and connect to your available Elasticsearch service from the SRS cluster. Disabling this setting requires you to provide connectivity details to your organization's external Elasticsearch service along with an appropriate TLS certificate with which you authenticate with the service. To pass the required certificate to the cluster using a secrets file, run the command, `$ make external-es-secrets NAMESPACE= ELASTICSEARCH_VERSION= PATH_TO_CERTIFICATE=`.
  • where NAMESPACE references your deployment namespace of the SRS cluster, `ELASTICSEARCH_VERSION` matches the Elasticsearch version you want to use, and `PATH_TO_CERTIFICATE` points to the location where you copied the required certificates on your location machine.
  • Use the following Make command to update the SRS and External Elasticsearch certificates: `$ make update-external-es-secrets NAMESPACE= PATH_TO_CERTIFICATE=`.
| @@ -140,8 +142,10 @@ srs: # Set srs.srsStorage.tls.enabled: true to enable the use of TLS-based authentication to your Elasticsearch service whether is it running as an internalized or externalized service in your SRS cluster. tls: enabled: false - # To specify a certificate used to authenticate an external Elasticsearch service (with tls.enabled: true and srsStorage.provisionInternalESCluster: false), uncomment the following line to specify the TLS certificate name for your Elasticsearch service. + # To specify a certificate used to authenticate an external Elasticsearch service (with tls.enabled: true and srsStorage.provisionInternalESCluster: false), uncomment the following lines to specify the TLS certificate name with password for your Elasticsearch service. + # Default certificatePassword value will be empty if not used. # certificateName: "Certificate_Name" + # certificatePassword: "password" # Set srs.srsStorage.basicAuthentication.enabled: true to enable the use of basic authentication to your Elasticsearch service whether is it running as an internalized or externalized service in your SRS cluster. basicAuthentication: enabled: true diff --git a/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml b/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml index acd6b06f0..feaa8bd7b 100644 --- a/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml +++ b/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml @@ -77,6 +77,8 @@ spec: key: password - name: PATH_TO_TRUSTSTORE value: "/usr/share/{{ .Values.srsStorage.certificateName | default "elastic-certificates.p12"}}" + - name: PATH_TO_KEYSTORE + value: "{{ .Values.srsStorage.certificatePassword | default ""}}" {{- end}} - name: APPLICATION_HOST value: "0.0.0.0" diff --git a/charts/backingservices/values.yaml b/charts/backingservices/values.yaml index b4a5cce18..0563e8bea 100644 --- a/charts/backingservices/values.yaml +++ b/charts/backingservices/values.yaml @@ -53,8 +53,10 @@ srs: tls: enabled: false # To specify a certificate used to authenticate an external Elasticsearch service (with tls.enabled: true and srsStorage.provisionInternalESCluster: false), - # uncomment the following line to specify the TLS certificate name for your Elasticsearch service. + # uncomment the following lines to specify the TLS certificate name with password for your Elasticsearch service. + # Default certificatePassword value will be empty if not used. # certificateName: "Certificate_Name" + # certificatePassword: "password" # Set srs.srsStorage.basicAuthentication.enabled: true to enable the use of basic authentication to your Elasticsearch service # whether is it running as an internalized or externalized service in your SRS cluster. basicAuthentication: diff --git a/terratest/src/test/backingservices/srs-deployment_test.go b/terratest/src/test/backingservices/srs-deployment_test.go index dc1cd943b..ae7cdadf0 100644 --- a/terratest/src/test/backingservices/srs-deployment_test.go +++ b/terratest/src/test/backingservices/srs-deployment_test.go @@ -206,6 +206,9 @@ func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec srsDeploy require.Equal(t, "PATH_TO_TRUSTSTORE", pod.Containers[0].Env[envIndex].Name) require.Equal(t, "/usr/share/elastic-certificates.p12", pod.Containers[0].Env[envIndex].Value) envIndex++ + require.Equal(t, "PATH_TO_KEYSTORE", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "", pod.Containers[0].Env[envIndex].Value) + envIndex++ } require.Equal(t, "APPLICATION_HOST", pod.Containers[0].Env[envIndex].Name) require.Equal(t, "0.0.0.0", pod.Containers[0].Env[envIndex].Value) From 1dc316c73a1f899a955aca8ef2434e64db0948a0 Mon Sep 17 00:00:00 2001 From: akshithac-21 <59637571+akshithac-21@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:35:48 +0530 Subject: [PATCH 10/12] support for compressed configuration (#672) --- charts/pega/README.md | 17 +++++++++++++++++ .../pega/templates/pega-environment-config.yaml | 6 ++++++ charts/pega/values.yaml | 4 ++++ 3 files changed, 27 insertions(+) diff --git a/charts/pega/README.md b/charts/pega/README.md index 7e4355b33..f56f6c8d5 100644 --- a/charts/pega/README.md +++ b/charts/pega/README.md @@ -652,6 +652,23 @@ tier: webXML: |- ... ``` +### Pega compressed configuration files + +To use [Pega configuration files](https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#pega-configuration-files) in compressed format when deploying Pega Platform, replace each file with its compressed format file by completing the following steps: + +1) Compress each configuration file using the following command in your local terminal: +``` +- cat "" | gzip -c | base64 +``` +Example for a prconfig.xml file: +``` +cat "pega-helm-charts/charts/pega/config/deploy/prconfig.xml" | gzip -c | base64 +``` +2) Provide the file content with the output of the command for each file executed. +3) Set the `compressedConfigurations` in values.yaml to `true`, as in the following example: +```yaml + compressedConfigurations: true +``` ### Pega diagnostic user diff --git a/charts/pega/templates/pega-environment-config.yaml b/charts/pega/templates/pega-environment-config.yaml index 44b2521c1..04e2e9482 100644 --- a/charts/pega/templates/pega-environment-config.yaml +++ b/charts/pega/templates/pega-environment-config.yaml @@ -43,6 +43,12 @@ data: JDBC_TIMEOUT_PROPERTIES_RO: {{ .Values.global.jdbc.readerConnectionTimeoutProperties }} {{- else }} JDBC_TIMEOUT_PROPERTIES_RO: "" +{{- end }} + # compression flag to decompress the config files of Pega Installation. +{{- if .Values.global.compressedConfigurations }} + IS_PEGA_CONFIG_COMPRESSED: "{{ .Values.global.compressedConfigurations }}" +{{- else }} + IS_PEGA_CONFIG_COMPRESSED: "false" {{- end }} # Rules schema of the Pega installation {{ if (eq (include "performUpgradeAndDeployment" .) "true") }} diff --git a/charts/pega/values.yaml b/charts/pega/values.yaml index e375fc52a..83c9cdf5b 100644 --- a/charts/pega/values.yaml +++ b/charts/pega/values.yaml @@ -121,6 +121,10 @@ global: serviceHost: "API_SERVICE_ADDRESS" httpsServicePort: "SERVICE_PORT_HTTPS" + # Set the `compressedConfigurations` parameter to `true` when the configuration files under charts/pega/config/deploy are in compressed format. + # For more information, see the “Pega compressed configuration files” section in the Pega Helm chart documentation. + compressedConfigurations: false + # Specify the Pega tiers to deploy tier: - name: "web" From 241bbb92e4078a55bf9e670269af22ec599b81d2 Mon Sep 17 00:00:00 2001 From: saran-teja-7 <126812400+saran-teja-7@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:48:00 +0530 Subject: [PATCH 11/12] US-583096 : Updated SRS Docker Image version from 1.25.3 to 1.28.0 (#675) --- charts/backingservices/charts/srs/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/backingservices/charts/srs/README.md b/charts/backingservices/charts/srs/README.md index 278d53fe2..90b0f0c61 100644 --- a/charts/backingservices/charts/srs/README.md +++ b/charts/backingservices/charts/srs/README.md @@ -18,19 +18,19 @@ The service deployment provisions runtime service pods along with a dependency o | Pega Infinity version | SRS version | Elasticsearch version | Description | |-----------------------|-------------|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | < 8.6 | NA | NA | SRS can be used with Pega Infinity 8.6 and later | -| \>= 8.6 | 1.25.3 | 7.10.2, 7.16.3, and 7.17.9 | While SRS Docker images are certified against Elasticsearch versions 7.10.2, 7.16.3 and 7.17.9, Pega recommends using Elasticsearch version 7.17.9. To stay current with Pega releases, use the latest available SRS image 1.25.3. +| \>= 8.6 | 1.28.0 | 7.10.2, 7.16.3, and 7.17.9 | While SRS Docker images are certified against Elasticsearch versions 7.10.2, 7.16.3 and 7.17.9, Pega recommends using Elasticsearch version 7.17.9. To stay current with Pega releases, use the latest available SRS image 1.28.0. **Note**: **If your deployment uses the internally-provisioned Elasticsearch:** To migrate to Elasticsearch version 7.17.9 from the Elasticsearch version 7.10.2 or 7.16.3 use the process that applies to your deployment: -* Update the SRS Docker image version to use v1.25.3, which supports both Elasticsearch versions 7.10.x and 7.16.x. +* Update the SRS Docker image version to use v1.28.0, which supports both Elasticsearch versions 7.10.x and 7.16.x. * Update the Elasticsearch `dependencies.version` parameter in the [requirement.yaml](../../requirements.yaml) to 7.17.3. * Update Elasticsearch to 7.17.9. **If your deployment connects to an externally-managed Elasticsearch service:** To migrate to Elasticsearch version 7.17.9 from the Elasticsearch version 7.10.2 or 7.16.3 use the process that applies to your deployment: -* Update the SRS Docker image version to use v1.25.3, which supports both Elasticsearch versions 7.10.x and 7.16.x. +* Update the SRS Docker image version to use v1.28.0, which supports both Elasticsearch versions 7.10.x and 7.16.x. * Complete the version upgrade to 7.17.9. Refer to Elasticsearch version 7.17 documentation. For example, see [Upgrade Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/setup-upgrade.html). ### SRS runtime configuration From f453712e6bbe7d3b73291e49f643021383262091 Mon Sep 17 00:00:00 2001 From: Kinga Kowalska <120555574+kingakowalska1@users.noreply.github.com> Date: Fri, 8 Dec 2023 17:04:21 +0100 Subject: [PATCH 12/12] BUG-822023 - Update readme to reflect current image rebuild policy (#678) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index c12efecc3..7033631d5 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,8 @@ Digest: Status: Downloaded pega-docker.downloads.pega.com/platform/pega: ``` +All Docker images for Pega Platform releases that are in Standard Support undergo a nightly rebuild that applies the latest available updates and patches to all third-party components. To take advantage of these updates, you must redeploy your Pega Platform with the latest available images. Pega does not guarantee nightly rebuilds for Pega Platform releases in Extended Support and stops rebuilding images for Pega Platform releases that are out of Extended Support. + For details about downloading and then pushing Docker images to your repository for your deployment, see [Using Pega-provided Docker images](https://docs.pega.com/bundle/platform-88/page/platform/deployment/client-managed-cloud/pega-docker-images-manage.html). From Helm chart versions `2.2.0` and above, update your Pega Platform version to the latest patch version.