diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index d44ee934d0c9..bddda1e7e655 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -33,12 +33,322 @@ This is the generated reference for the Loki Helm Chart values. Default + + adminApi + object + Configuration for the `admin-api` target +
+{
+  "affinity": {},
+  "annotations": {},
+  "containerSecurityContext": {
+    "allowPrivilegeEscalation": false,
+    "capabilities": {
+      "drop": [
+        "ALL"
+      ]
+    },
+    "readOnlyRootFilesystem": true
+  },
+  "env": [],
+  "extraArgs": {},
+  "extraContainers": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "initContainers": [],
+  "labels": {},
+  "nodeSelector": {},
+  "podSecurityContext": {
+    "runAsGroup": 10001,
+    "runAsNonRoot": true,
+    "runAsUser": 10001
+  },
+  "readinessProbe": {
+    "httpGet": {
+      "path": "/ready",
+      "port": "http-metrics"
+    },
+    "initialDelaySeconds": 45
+  },
+  "replicas": 1,
+  "resources": {},
+  "service": {
+    "annotations": {},
+    "labels": {}
+  },
+  "strategy": {
+    "type": "RollingUpdate"
+  },
+  "terminationGracePeriodSeconds": 60,
+  "tolerations": []
+}
+
+ + + + adminApi.affinity + object + Affinity for admin-api Pods +
+{}
+
+ + + + adminApi.annotations + object + Additional annotations for the `admin-api` Deployment +
+{}
+
+ + + + adminApi.env + list + Configure optional environment variables +
+[]
+
+ + + + adminApi.extraArgs + object + Additional CLI arguments for the `admin-api` target +
+{}
+
+ + + + adminApi.extraContainers + list + Conifgure optional extraContainers +
+[]
+
+ + + + adminApi.extraVolumeMounts + list + Additional volume mounts for Pods +
+[]
+
+ + + + adminApi.extraVolumes + list + Additional volumes for Pods +
+[]
+
+ + + + adminApi.hostAliases + list + hostAliases to add +
+[]
+
+ + + + adminApi.initContainers + list + Configure optional initContainers +
+[]
+
+ + + + adminApi.labels + object + Additional labels for the `admin-api` Deployment +
+{}
+
+ + + + adminApi.nodeSelector + object + Node selector for admin-api Pods +
+{}
+
+ + + + adminApi.podSecurityContext + object + Run container as user `enterprise-logs(uid=10001)` `fsGroup` must not be specified, because these security options are applied on container level not on Pod level. +
+{
+  "runAsGroup": 10001,
+  "runAsNonRoot": true,
+  "runAsUser": 10001
+}
+
+ + + + adminApi.readinessProbe + object + Readiness probe +
+{
+  "httpGet": {
+    "path": "/ready",
+    "port": "http-metrics"
+  },
+  "initialDelaySeconds": 45
+}
+
+ + + + adminApi.replicas + int + Define the amount of instances +
+1
+
+ + + + adminApi.resources + object + Values are defined in small.yaml and large.yaml +
+{}
+
+ + + + adminApi.service + object + Additional labels and annotations for the `admin-api` Service +
+{
+  "annotations": {},
+  "labels": {}
+}
+
+ + + + adminApi.strategy + object + Update strategy +
+{
+  "type": "RollingUpdate"
+}
+
+ + + + adminApi.terminationGracePeriodSeconds + int + Grace period to allow the admin-api to shutdown before it is killed +
+60
+
+ + + + adminApi.tolerations + list + Tolerations for admin-api Pods +
+[]
+
+ + + + backend + object + Configuration for the backend pod(s) +
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "backend"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "annotations": {},
+  "autoscaling": {
+    "behavior": {},
+    "enabled": false,
+    "maxReplicas": 6,
+    "minReplicas": 3,
+    "targetCPUUtilizationPercentage": 60,
+    "targetMemoryUtilizationPercentage": null
+  },
+  "dnsConfig": {},
+  "extraArgs": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "nodeSelector": {},
+  "persistence": {
+    "dataVolumeParameters": {
+      "emptyDir": {}
+    },
+    "enableStatefulSetAutoDeletePVC": true,
+    "selector": null,
+    "size": "10Gi",
+    "storageClass": null,
+    "volumeClaimsEnabled": true
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "podManagementPolicy": "Parallel",
+  "priorityClassName": null,
+  "replicas": 3,
+  "resources": {},
+  "selectorLabels": {},
+  "service": {
+    "annotations": {},
+    "labels": {}
+  },
+  "targetModule": "backend",
+  "terminationGracePeriodSeconds": 300,
+  "tolerations": [],
+  "topologySpreadConstraints": []
+}
+
+ + backend.affinity - string - Affinity for backend pods. Passed through `tpl` and, thus, to be configured as string + object + Affinity for backend pods.
-Hard node and soft zone anti-affinity
+Hard node anti-affinity
 
@@ -378,3103 +688,8823 @@ null - clusterLabelOverride - string - Overrides the chart's cluster label + bloomCompactor + object + Configuration for the bloom compactor
-null
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "bloom-compactor"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "command": null,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "livenessProbe": {},
+  "nodeSelector": {},
+  "persistence": {
+    "annotations": {},
+    "claims": [
+      {
+        "name": "data",
+        "size": "10Gi",
+        "storageClass": null
+      }
+    ],
+    "enableStatefulSetAutoDeletePVC": false,
+    "enabled": false,
+    "size": "10Gi",
+    "storageClass": null,
+    "whenDeleted": "Retain",
+    "whenScaled": "Retain"
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "readinessProbe": {},
+  "replicas": 0,
+  "resources": {},
+  "serviceAccount": {
+    "annotations": {},
+    "automountServiceAccountToken": true,
+    "create": false,
+    "imagePullSecrets": [],
+    "name": null
+  },
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": []
+}
 
- enterprise.adminApi + bloomCompactor.affinity object - If enabled, the correct admin_client storage will be configured. If disabled while running enterprise, make sure auth is set to `type: trust`, or that `auth_enabled` is set to `false`. -
-{
-  "enabled": true
-}
+			Affinity for bloom compactor pods.
+			
+Hard node anti-affinity
 
- enterprise.adminToken.additionalNamespaces - list - Additional namespace to also create the token in. Useful if your Grafana instance is in a different namespace + bloomCompactor.appProtocol + object + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
-[]
+{
+  "grpc": ""
+}
 
- enterprise.adminToken.secret + bloomCompactor.command string - Alternative name for admin token secret, needed by tokengen and provisioner jobs + Command to execute instead of defined in Docker image
 null
 
- enterprise.canarySecret - string - Alternative name of the secret to store token for the canary + bloomCompactor.extraArgs + list + Additional CLI args for the bloom compactor
-null
+[]
 
- enterprise.cluster_name - string - Optional name of the GEL cluster, otherwise will use .Release.Name The cluster name must match what is in your GEL license + bloomCompactor.extraContainers + list + Containers to add to the bloom compactor pods
-null
+[]
 
- enterprise.config - string - + bloomCompactor.extraEnv + list + Environment variables to add to the bloom compactor pods
-"{{- if .Values.enterprise.adminApi.enabled }}\n{{- if or .Values.minio.enabled (eq .Values.loki.storage.type \"s3\") (eq .Values.loki.storage.type \"gcs\") (eq .Values.loki.storage.type \"azure\") }}\nadmin_client:\n  storage:\n    s3:\n      bucket_name: {{ .Values.loki.storage.bucketNames.admin }}\n{{- end }}\n{{- end }}\nauth:\n  type: {{ .Values.enterprise.adminApi.enabled | ternary \"enterprise\" \"trust\" }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\ncluster_name: {{ include \"loki.clusterName\" . }}\nlicense:\n  path: /etc/loki/license/license.jwt\n"
+[]
 
- enterprise.enabled - bool - + bloomCompactor.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the bloom compactor pods
-false
+[]
 
- enterprise.externalConfigName - string - Name of the external config secret to use + bloomCompactor.extraVolumeMounts + list + Volume mounts to add to the bloom compactor pods
-""
+[]
 
- enterprise.externalLicenseName - string - Name of external license secret to use + bloomCompactor.extraVolumes + list + Volumes to add to the bloom compactor pods
-null
+[]
 
- enterprise.image.digest + bloomCompactor.hostAliases + list + hostAliases to add +
+[]
+
+ + + + bloomCompactor.image.registry string - Overrides the image tag with an image digest + The Docker registry for the bloom compactor image. Overrides `loki.image.registry`
 null
 
- enterprise.image.pullPolicy + bloomCompactor.image.repository string - Docker image pull policy + Docker image repository for the bloom compactor image. Overrides `loki.image.repository`
-"IfNotPresent"
+null
 
- enterprise.image.registry + bloomCompactor.image.tag string - The Docker registry + Docker image tag for the bloom compactor image. Overrides `loki.image.tag`
-"docker.io"
+null
 
- enterprise.image.repository - string - Docker image repository + bloomCompactor.initContainers + list + Init containers to add to the bloom compactor pods
-"grafana/enterprise-logs"
+[]
 
- enterprise.image.tag - string - Docker image tag + bloomCompactor.livenessProbe + object + liveness probe settings for ingester pods. If empty use `loki.livenessProbe`
-null
+{}
 
- enterprise.license + bloomCompactor.nodeSelector object - Grafana Enterprise Logs license In order to use Grafana Enterprise Logs features, you will need to provide the contents of your Grafana Enterprise Logs license, either by providing the contents of the license.jwt, or the name Kubernetes Secret that contains your license.jwt. To set the license contents, use the flag `--set-file 'enterprise.license.contents=./license.jwt'` + Node selector for bloom compactor pods
-{
-  "contents": "NOTAVALIDLICENSE"
-}
+{}
 
- enterprise.provisioner + bloomCompactor.persistence.annotations object - Configuration for `provisioner` target + Annotations for bloom compactor PVCs
-{
-  "additionalTenants": [],
-  "annotations": {},
-  "enabled": true,
-  "env": [],
-  "extraVolumeMounts": [],
-  "image": {
-    "digest": null,
-    "pullPolicy": "IfNotPresent",
-    "registry": "docker.io",
-    "repository": "grafana/enterprise-logs-provisioner",
-    "tag": null
-  },
-  "labels": {},
-  "priorityClassName": null,
-  "provisionedSecretPrefix": null,
-  "securityContext": {
-    "fsGroup": 10001,
-    "runAsGroup": 10001,
-    "runAsNonRoot": true,
-    "runAsUser": 10001
-  }
-}
+{}
 
- enterprise.provisioner.additionalTenants + bloomCompactor.persistence.claims list - Additional tenants to be created. Each tenant will get a read and write policy and associated token. Tenant must have a name and a namespace for the secret containting the token to be created in. For example additionalTenants: - name: loki secretNamespace: grafana -
-[]
+			List of the bloom compactor PVCs
+			
+
 
- enterprise.provisioner.annotations - object - Additional annotations for the `provisioner` Job + bloomCompactor.persistence.enableStatefulSetAutoDeletePVC + bool + Enable StatefulSetAutoDeletePVC feature
-{}
+false
 
- enterprise.provisioner.enabled + bloomCompactor.persistence.enabled bool - Whether the job should be part of the deployment + Enable creating PVCs for the bloom compactor
-true
+false
 
- enterprise.provisioner.env - list - Additional Kubernetes environment + bloomCompactor.persistence.size + string + Size of persistent disk
-[]
+"10Gi"
 
- enterprise.provisioner.extraVolumeMounts - list - Volume mounts to add to the provisioner pods + bloomCompactor.persistence.storageClass + string + Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
-[]
+null
 
- enterprise.provisioner.image + bloomCompactor.podAnnotations object - Provisioner image to Utilize + Annotations for bloom compactor pods
-{
-  "digest": null,
-  "pullPolicy": "IfNotPresent",
-  "registry": "docker.io",
-  "repository": "grafana/enterprise-logs-provisioner",
-  "tag": null
-}
+{}
 
- enterprise.provisioner.image.digest - string - Overrides the image tag with an image digest + bloomCompactor.podLabels + object + Labels for bloom compactor pods
-null
+{}
 
- enterprise.provisioner.image.pullPolicy + bloomCompactor.priorityClassName string - Docker image pull policy + The name of the PriorityClass for bloom compactor pods
-"IfNotPresent"
+null
 
- enterprise.provisioner.image.registry - string - The Docker registry + bloomCompactor.readinessProbe + object + readiness probe settings for ingester pods. If empty, use `loki.readinessProbe`
-"docker.io"
+{}
 
- enterprise.provisioner.image.repository - string - Docker image repository + bloomCompactor.replicas + int + Number of replicas for the bloom compactor
-"grafana/enterprise-logs-provisioner"
+0
 
- enterprise.provisioner.image.tag - string - Overrides the image tag whose default is the chart's appVersion + bloomCompactor.resources + object + Resource requests and limits for the bloom compactor
-null
+{}
 
- enterprise.provisioner.labels + bloomCompactor.serviceAccount.annotations object - Additional labels for the `provisioner` Job + Annotations for the bloom compactor service account
 {}
 
- enterprise.provisioner.priorityClassName - string - The name of the PriorityClass for provisioner Job + bloomCompactor.serviceAccount.automountServiceAccountToken + bool + Set this toggle to false to opt out of automounting API credentials for the service account
-null
+true
 
- enterprise.provisioner.provisionedSecretPrefix + bloomCompactor.serviceAccount.imagePullSecrets + list + Image pull secrets for the bloom compactor service account +
+[]
+
+ + + + bloomCompactor.serviceAccount.name string - Name of the secret to store provisioned tokens in + The name of the ServiceAccount to use for the bloom compactor. If not set and create is true, a name is generated by appending "-bloom-compactor" to the common ServiceAccount.
 null
 
- enterprise.provisioner.securityContext + bloomCompactor.serviceLabels object - Run containers as user `enterprise-logs(uid=10001)` + Labels for bloom compactor service
-{
-  "fsGroup": 10001,
-  "runAsGroup": 10001,
-  "runAsNonRoot": true,
-  "runAsUser": 10001
-}
+{}
 
- enterprise.tokengen + bloomCompactor.terminationGracePeriodSeconds + int + Grace period to allow the bloom compactor to shutdown before it is killed +
+30
+
+ + + + bloomCompactor.tolerations + list + Tolerations for bloom compactor pods +
+[]
+
+ + + + bloomGateway object - Configuration for `tokengen` target + Configuration for the bloom gateway
 {
-  "annotations": {},
-  "enabled": true,
-  "env": [],
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "bloom-gateway"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "command": null,
   "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
   "extraEnvFrom": [],
   "extraVolumeMounts": [],
   "extraVolumes": [],
-  "labels": {},
-  "priorityClassName": "",
-  "securityContext": {
-    "fsGroup": 10001,
-    "runAsGroup": 10001,
-    "runAsNonRoot": true,
-    "runAsUser": 10001
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
   },
-  "targetModule": "tokengen",
+  "initContainers": [],
+  "livenessProbe": {},
+  "nodeSelector": {},
+  "persistence": {
+    "annotations": {},
+    "claims": [
+      {
+        "name": "data",
+        "size": "10Gi",
+        "storageClass": null
+      }
+    ],
+    "enableStatefulSetAutoDeletePVC": false,
+    "enabled": false,
+    "size": "10Gi",
+    "storageClass": null,
+    "whenDeleted": "Retain",
+    "whenScaled": "Retain"
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "readinessProbe": {},
+  "replicas": 0,
+  "resources": {},
+  "serviceAccount": {
+    "annotations": {},
+    "automountServiceAccountToken": true,
+    "create": false,
+    "imagePullSecrets": [],
+    "name": null
+  },
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
   "tolerations": []
 }
 
- enterprise.tokengen.annotations + bloomGateway.affinity object - Additional annotations for the `tokengen` Job + Affinity for bloom gateway pods. +
+Hard node anti-affinity
+
+ + + + bloomGateway.appProtocol + object + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
-{}
+{
+  "grpc": ""
+}
 
- enterprise.tokengen.enabled - bool - Whether the job should be part of the deployment + bloomGateway.command + string + Command to execute instead of defined in Docker image
-true
+null
 
- enterprise.tokengen.env + bloomGateway.extraArgs list - Additional Kubernetes environment + Additional CLI args for the bloom gateway
 []
 
- enterprise.tokengen.extraArgs + bloomGateway.extraContainers list - Additional CLI arguments for the `tokengen` target + Containers to add to the bloom gateway pods
 []
 
- enterprise.tokengen.extraEnvFrom + bloomGateway.extraEnv list - Environment variables from secrets or configmaps to add to the tokengen pods + Environment variables to add to the bloom gateway pods
 []
 
- enterprise.tokengen.extraVolumeMounts + bloomGateway.extraEnvFrom list - Additional volume mounts for Pods + Environment variables from secrets or configmaps to add to the bloom gateway pods
 []
 
- enterprise.tokengen.extraVolumes + bloomGateway.extraVolumeMounts list - Additional volumes for Pods + Volume mounts to add to the bloom gateway pods
 []
 
- enterprise.tokengen.labels - object - Additional labels for the `tokengen` Job + bloomGateway.extraVolumes + list + Volumes to add to the bloom gateway pods
-{}
+[]
 
- enterprise.tokengen.priorityClassName - string - The name of the PriorityClass for tokengen Pods + bloomGateway.hostAliases + list + hostAliases to add
-""
+[]
 
- enterprise.tokengen.securityContext - object - Run containers as user `enterprise-logs(uid=10001)` + bloomGateway.image.registry + string + The Docker registry for the bloom gateway image. Overrides `loki.image.registry`
-{
-  "fsGroup": 10001,
-  "runAsGroup": 10001,
-  "runAsNonRoot": true,
-  "runAsUser": 10001
-}
+null
 
- enterprise.tokengen.targetModule + bloomGateway.image.repository string - Comma-separated list of Loki modules to load for tokengen + Docker image repository for the bloom gateway image. Overrides `loki.image.repository`
-"tokengen"
+null
 
- enterprise.tokengen.tolerations - list - Tolerations for tokengen Job + bloomGateway.image.tag + string + Docker image tag for the bloom gateway image. Overrides `loki.image.tag`
-[]
+null
 
- enterprise.useExternalLicense - bool - Set to true when providing an external license + bloomGateway.initContainers + list + Init containers to add to the bloom gateway pods
-false
+[]
 
- enterprise.version - string - + bloomGateway.livenessProbe + object + liveness probe settings for ingester pods. If empty use `loki.livenessProbe`
-"v1.8.6"
+{}
 
- extraObjects - list - + bloomGateway.nodeSelector + object + Node selector for bloom gateway pods
-[]
+{}
 
- fullnameOverride - string - Overrides the chart's computed fullname + bloomGateway.persistence.annotations + object + Annotations for bloom gateway PVCs
-null
+{}
 
- gateway.affinity - string - Affinity for gateway pods. Passed through `tpl` and, thus, to be configured as string -
-Hard node and soft zone anti-affinity
+			bloomGateway.persistence.claims
+			list
+			List of the bloom gateway PVCs
+			
+
 
- gateway.annotations - object - Annotations for gateway deployment + bloomGateway.persistence.enableStatefulSetAutoDeletePVC + bool + Enable StatefulSetAutoDeletePVC feature
-{}
+false
 
- gateway.autoscaling.behavior - object - Behavior policies while scaling. + bloomGateway.persistence.enabled + bool + Enable creating PVCs for the bloom gateway
-{}
+false
 
- gateway.autoscaling.enabled - bool - Enable autoscaling for the gateway + bloomGateway.persistence.size + string + Size of persistent disk
-false
+"10Gi"
 
- gateway.autoscaling.maxReplicas - int - Maximum autoscaling replicas for the gateway + bloomGateway.persistence.storageClass + string + Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
-3
+null
 
- gateway.autoscaling.minReplicas - int - Minimum autoscaling replicas for the gateway + bloomGateway.podAnnotations + object + Annotations for bloom gateway pods
-1
+{}
 
- gateway.autoscaling.targetCPUUtilizationPercentage - int - Target CPU utilisation percentage for the gateway + bloomGateway.podLabels + object + Labels for bloom gateway pods
-60
+{}
 
- gateway.autoscaling.targetMemoryUtilizationPercentage + bloomGateway.priorityClassName string - Target memory utilisation percentage for the gateway + The name of the PriorityClass for bloom gateway pods
 null
 
- gateway.basicAuth.enabled - bool - Enables basic authentication for the gateway + bloomGateway.readinessProbe + object + readiness probe settings for ingester pods. If empty, use `loki.readinessProbe`
-false
+{}
 
- gateway.basicAuth.existingSecret - string - Existing basic auth secret to use. Must contain '.htpasswd' + bloomGateway.replicas + int + Number of replicas for the bloom gateway
-null
+0
 
- gateway.basicAuth.htpasswd - string - Uses the specified users from the `loki.tenants` list to create the htpasswd file if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes high CPU load. + bloomGateway.resources + object + Resource requests and limits for the bloom gateway
-"{{ if .Values.loki.tenants }}\n\n  {{- range $t := .Values.loki.tenants }}\n{{ htpasswd (required \"All tenants must have a 'name' set\" $t.name) (required \"All tenants must have a 'password' set\" $t.password) }}\n\n  {{- end }}\n{{ else }} {{ htpasswd (required \"'gateway.basicAuth.username' is required\" .Values.gateway.basicAuth.username) (required \"'gateway.basicAuth.password' is required\" .Values.gateway.basicAuth.password) }} {{ end }}"
+{}
 
- gateway.basicAuth.password - string - The basic auth password for the gateway + bloomGateway.serviceAccount.annotations + object + Annotations for the bloom gateway service account
-null
+{}
 
- gateway.basicAuth.username - string - The basic auth username for the gateway + bloomGateway.serviceAccount.automountServiceAccountToken + bool + Set this toggle to false to opt out of automounting API credentials for the service account
-null
+true
 
- gateway.containerSecurityContext - object - The SecurityContext for gateway containers + bloomGateway.serviceAccount.imagePullSecrets + list + Image pull secrets for the bloom gateway service account
-{
-  "allowPrivilegeEscalation": false,
-  "capabilities": {
-    "drop": [
-      "ALL"
-    ]
-  },
-  "readOnlyRootFilesystem": true
-}
+[]
 
- gateway.deploymentStrategy.type + bloomGateway.serviceAccount.name string - + The name of the ServiceAccount to use for the bloom gateway. If not set and create is true, a name is generated by appending "-bloom-gateway" to the common ServiceAccount.
-"RollingUpdate"
+null
 
- gateway.dnsConfig + bloomGateway.serviceLabels object - DNS config for gateway pods + Labels for bloom gateway service
 {}
 
- gateway.enabled - bool - Specifies whether the gateway should be enabled + bloomGateway.terminationGracePeriodSeconds + int + Grace period to allow the bloom gateway to shutdown before it is killed
-true
+30
 
- gateway.extraArgs + bloomGateway.tolerations list - Additional CLI args for the gateway + Tolerations for bloom gateway pods
 []
 
- gateway.extraContainers - list - Containers to add to the gateway pods + chunksCache.affinity + object + Affinity for chunks-cache pods
-[]
+{}
 
- gateway.extraEnv - list - Environment variables to add to the gateway pods + chunksCache.allocatedMemory + int + Amount of memory allocated to chunks-cache for object storage (in MB).
-[]
+8192
 
- gateway.extraEnvFrom - list - Environment variables from secrets or configmaps to add to the gateway pods + chunksCache.annotations + object + Annotations for the chunks-cache pods
-[]
+{}
 
- gateway.extraVolumeMounts - list - Volume mounts to add to the gateway pods + chunksCache.batchSize + int + Batchsize for sending and receiving chunks from chunks cache
-[]
+4
 
- gateway.extraVolumes - list - Volumes to add to the gateway pods + chunksCache.connectionLimit + int + Maximum number of connections allowed
-[]
+16384
 
- gateway.image.digest + chunksCache.defaultValidity string - Overrides the gateway image tag with an image digest + Specify how long cached chunks should be stored in the chunks-cache before being expired
-null
+"0s"
 
- gateway.image.pullPolicy - string - The gateway image pull policy + chunksCache.enabled + bool + Specifies whether memcached based chunks-cache should be enabled
-"IfNotPresent"
+true
 
- gateway.image.registry - string - The Docker registry for the gateway image + chunksCache.extraArgs + object + Additional CLI args for chunks-cache
-"docker.io"
+{}
 
- gateway.image.repository - string - The gateway image repository + chunksCache.extraContainers + list + Additional containers to be added to the chunks-cache pod.
-"nginxinc/nginx-unprivileged"
+[]
 
- gateway.image.tag + chunksCache.extraExtendedOptions string - The gateway image tag + Add extended options for chunks-cache memcached container. The format is the same as for the memcached -o/--extend flag. Example: extraExtendedOptions: 'tls,no_hashexpand'
-"1.24-alpine"
+""
 
- gateway.ingress.annotations - object - Annotations for the gateway ingress + chunksCache.extraVolumeMounts + list + Additional volume mounts to be added to the chunks-cache pod (applies to both memcached and exporter containers). Example: extraVolumeMounts: - name: extra-volume mountPath: /etc/extra-volume readOnly: true
-{}
+[]
 
- gateway.ingress.enabled - bool - Specifies whether an ingress for the gateway should be created + chunksCache.extraVolumes + list + Additional volumes to be added to the chunks-cache pod (applies to both memcached and exporter containers). Example: extraVolumes: - name: extra-volume secret: secretName: extra-volume-secret
-false
+[]
 
- gateway.ingress.hosts + chunksCache.initContainers list - Hosts configuration for the gateway ingress, passed through the `tpl` function to allow templating + Extra init containers for chunks-cache pods
-[
-  {
-    "host": "gateway.loki.example.com",
-    "paths": [
-      {
-        "path": "/"
-      }
-    ]
-  }
-]
+[]
 
- gateway.ingress.ingressClassName - string - Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 + chunksCache.maxItemMemory + int + Maximum item memory for chunks-cache (in MB).
-""
+5
 
- gateway.ingress.labels + chunksCache.nodeSelector object - Labels for the gateway ingress + Node selector for chunks-cache pods
 {}
 
- gateway.ingress.tls - list - TLS configuration for the gateway ingress. Hosts passed through the `tpl` function to allow templating + chunksCache.parallelism + int + Parallel threads for sending and receiving chunks from chunks cache
-[
-  {
-    "hosts": [
-      "gateway.loki.example.com"
-    ],
-    "secretName": "loki-gateway-tls"
-  }
-]
+5
 
- gateway.lifecycle + chunksCache.podAnnotations object - Lifecycle for the gateway container + Annotations for chunks-cache pods
 {}
 
- gateway.nginxConfig.customBackendUrl - string - Override Backend URL + chunksCache.podDisruptionBudget + object + Pod Disruption Budget
-null
+{
+  "maxUnavailable": 1
+}
 
- gateway.nginxConfig.customReadUrl - string - Override Read URL + chunksCache.podLabels + object + Labels for chunks-cache pods
-null
+{}
 
- gateway.nginxConfig.customWriteUrl + chunksCache.podManagementPolicy string - Override Write URL -
-null
-
- - - - gateway.nginxConfig.enableIPv6 - bool - Enable listener for IPv6, disable on IPv4-only systems + Management policy for chunks-cache pods
-true
-
- - - - gateway.nginxConfig.file - string - Config file contents for Nginx. Passed through the `tpl` function to allow templating -
-See values.yaml
+"Parallel"
 
- gateway.nginxConfig.httpSnippet - string - Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating + chunksCache.port + int + Port of the chunks-cache service
-"{{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }}"
+11211
 
- gateway.nginxConfig.logFormat + chunksCache.priorityClassName string - NGINX log format + The name of the PriorityClass for chunks-cache pods
-"main '$remote_addr - $remote_user [$time_local]  $status '\n        '\"$request\" $body_bytes_sent \"$http_referer\" '\n        '\"$http_user_agent\" \"$http_x_forwarded_for\"';"
+null
 
- gateway.nginxConfig.resolver - string - Allows overriding the DNS resolver address nginx will use. + chunksCache.replicas + int + Total number of chunks-cache replicas
-""
+1
 
- gateway.nginxConfig.serverSnippet + chunksCache.resources string - Allows appending custom configuration to the server block -
-""
-
- - - - gateway.nodeSelector - object - Node selector for gateway pods + Resource requests and limits for the chunks-cache By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)).
-{}
-
- - - - gateway.podAnnotations - object - Annotations for gateway pods -
-{}
+null
 
- gateway.podLabels + chunksCache.service object - Additional labels for gateway pods + Service annotations and labels
-{}
+{
+  "annotations": {},
+  "labels": {}
+}
 
- gateway.podSecurityContext + chunksCache.statefulStrategy object - The SecurityContext for gateway containers + Stateful chunks-cache strategy
 {
-  "fsGroup": 101,
-  "runAsGroup": 101,
-  "runAsNonRoot": true,
-  "runAsUser": 101
+  "type": "RollingUpdate"
 }
 
- gateway.priorityClassName - string - The name of the PriorityClass for gateway pods + chunksCache.terminationGracePeriodSeconds + int + Grace period to allow the chunks-cache to shutdown before it is killed
-null
+60
 
- gateway.readinessProbe.httpGet.path + chunksCache.timeout string - + Memcached operation timeout
-"/"
+"2000ms"
 
- gateway.readinessProbe.httpGet.port - string - + chunksCache.tolerations + list + Tolerations for chunks-cache pods
-"http"
+[]
 
- gateway.readinessProbe.initialDelaySeconds - int - + chunksCache.topologySpreadConstraints + list + topologySpreadConstraints allows to customize the default topologySpreadConstraints. This can be either a single dict as shown below or a slice of topologySpreadConstraints. labelSelector is taken from the constraint itself (if it exists) or is generated by the chart using the same selectors as for services.
-15
+[]
 
- gateway.readinessProbe.timeoutSeconds + chunksCache.writebackBuffer int - + Max number of objects to use for cache write back
-1
+500000
 
- gateway.replicas + chunksCache.writebackParallelism int - Number of replicas for the gateway + Number of parallel threads for cache write back
 1
 
- gateway.resources - object - Resource requests and limits for the gateway -
-{}
-
- - - - gateway.service.annotations - object - Annotations for the gateway service + chunksCache.writebackSizeLimit + string + Max memory to use for cache write back
-{}
+"500MB"
 
- gateway.service.clusterIP + clusterLabelOverride string - ClusterIP of the gateway service + Overrides the chart's cluster label
 null
 
- gateway.service.labels + compactor object - Labels for gateway service + Configuration for the compactor
-{}
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "compactor"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "command": null,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "livenessProbe": {},
+  "nodeSelector": {},
+  "persistence": {
+    "annotations": {},
+    "claims": [
+      {
+        "name": "data",
+        "size": "10Gi",
+        "storageClass": null
+      }
+    ],
+    "enableStatefulSetAutoDeletePVC": false,
+    "enabled": false,
+    "size": "10Gi",
+    "storageClass": null,
+    "whenDeleted": "Retain",
+    "whenScaled": "Retain"
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "readinessProbe": {},
+  "replicas": 0,
+  "resources": {},
+  "serviceAccount": {
+    "annotations": {},
+    "automountServiceAccountToken": true,
+    "create": false,
+    "imagePullSecrets": [],
+    "name": null
+  },
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": []
+}
 
- gateway.service.loadBalancerIP - string - Load balancer IPO address if service type is LoadBalancer -
-null
+			compactor.affinity
+			object
+			Affinity for compactor pods.
+			
+Hard node anti-affinity
 
- gateway.service.nodePort - int - Node port if service type is NodePort + compactor.appProtocol + object + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
-null
+{
+  "grpc": ""
+}
 
- gateway.service.port - int - Port of the gateway service + compactor.command + string + Command to execute instead of defined in Docker image
-80
+null
 
- gateway.service.type - string - Type of the gateway service + compactor.extraArgs + list + Additional CLI args for the compactor
-"ClusterIP"
+[]
 
- gateway.terminationGracePeriodSeconds - int - Grace period to allow the gateway to shutdown before it is killed + compactor.extraContainers + list + Containers to add to the compactor pods
-30
+[]
 
- gateway.tolerations + compactor.extraEnv list - Tolerations for gateway pods + Environment variables to add to the compactor pods
 []
 
- gateway.topologySpreadConstraints + compactor.extraEnvFrom list - Topology Spread Constraints for gateway pods + Environment variables from secrets or configmaps to add to the compactor pods
 []
 
- gateway.verboseLogging - bool - Enable logging of 2xx and 3xx HTTP requests + compactor.extraVolumeMounts + list + Volume mounts to add to the compactor pods
-true
+[]
 
- global.clusterDomain - string - configures cluster domain ("cluster.local" by default) + compactor.extraVolumes + list + Volumes to add to the compactor pods
-"cluster.local"
+[]
 
- global.dnsNamespace - string - configures DNS service namespace + compactor.hostAliases + list + hostAliases to add
-"kube-system"
+[]
 
- global.dnsService + compactor.image.registry string - configures DNS service name + The Docker registry for the compactor image. Overrides `loki.image.registry`
-"kube-dns"
+null
 
- global.image.registry + compactor.image.repository string - Overrides the Docker registry globally for all images + Docker image repository for the compactor image. Overrides `loki.image.repository`
 null
 
- global.priorityClassName + compactor.image.tag string - Overrides the priorityClassName for all pods + Docker image tag for the compactor image. Overrides `loki.image.tag`
 null
 
- imagePullSecrets + compactor.initContainers list - Image pull secrets for Docker images + Init containers to add to the compactor pods
 []
 
- ingress.annotations + compactor.livenessProbe object - + liveness probe settings for ingester pods. If empty use `loki.livenessProbe`
 {}
 
- ingress.enabled - bool - + compactor.nodeSelector + object + Node selector for compactor pods
-false
+{}
 
- ingress.hosts - list - Hosts configuration for the ingress, passed through the `tpl` function to allow templating + compactor.persistence.annotations + object + Annotations for compactor PVCs
-[
-  "loki.example.com"
-]
+{}
 
- ingress.ingressClassName - string - -
-""
+			compactor.persistence.claims
+			list
+			List of the compactor PVCs
+			
+
 
- ingress.labels - object - + compactor.persistence.enableStatefulSetAutoDeletePVC + bool + Enable StatefulSetAutoDeletePVC feature
-{}
+false
 
- ingress.paths.read[0] - string - + compactor.persistence.enabled + bool + Enable creating PVCs for the compactor
-"/api/prom/tail"
+false
 
- ingress.paths.read[1] + compactor.persistence.size string - + Size of persistent disk
-"/loki/api/v1/tail"
+"10Gi"
 
- ingress.paths.read[2] + compactor.persistence.storageClass string - + Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
-"/loki/api"
+null
 
- ingress.paths.read[3] - string - + compactor.podAnnotations + object + Annotations for compactor pods
-"/api/prom/rules"
+{}
 
- ingress.paths.read[4] - string - + compactor.podLabels + object + Labels for compactor pods
-"/loki/api/v1/rules"
+{}
 
- ingress.paths.read[5] + compactor.priorityClassName string - + The name of the PriorityClass for compactor pods
-"/prometheus/api/v1/rules"
+null
 
- ingress.paths.read[6] - string - + compactor.readinessProbe + object + readiness probe settings for ingester pods. If empty, use `loki.readinessProbe`
-"/prometheus/api/v1/alerts"
+{}
 
- ingress.paths.singleBinary[0] - string - -
-"/api/prom/push"
-
- - - - ingress.paths.singleBinary[1] - string - + compactor.replicas + int + Number of replicas for the compactor
-"/loki/api/v1/push"
+0
 
- ingress.paths.singleBinary[2] - string - + compactor.resources + object + Resource requests and limits for the compactor
-"/api/prom/tail"
+{}
 
- ingress.paths.singleBinary[3] - string - + compactor.serviceAccount.annotations + object + Annotations for the compactor service account
-"/loki/api/v1/tail"
+{}
 
- ingress.paths.singleBinary[4] - string - + compactor.serviceAccount.automountServiceAccountToken + bool + Set this toggle to false to opt out of automounting API credentials for the service account
-"/loki/api"
+true
 
- ingress.paths.singleBinary[5] - string - + compactor.serviceAccount.imagePullSecrets + list + Image pull secrets for the compactor service account
-"/api/prom/rules"
+[]
 
- ingress.paths.singleBinary[6] + compactor.serviceAccount.name string - + The name of the ServiceAccount to use for the compactor. If not set and create is true, a name is generated by appending "-compactor" to the common ServiceAccount.
-"/loki/api/v1/rules"
+null
 
- ingress.paths.singleBinary[7] - string - + compactor.serviceLabels + object + Labels for compactor service
-"/prometheus/api/v1/rules"
+{}
 
- ingress.paths.singleBinary[8] - string - + compactor.terminationGracePeriodSeconds + int + Grace period to allow the compactor to shutdown before it is killed
-"/prometheus/api/v1/alerts"
+30
 
- ingress.paths.write[0] - string - + compactor.tolerations + list + Tolerations for compactor pods
-"/api/prom/push"
+[]
 
- ingress.paths.write[1] + deploymentMode string - + Deployment mode lets you specify how to deploy Loki. There are 3 options: - SingleBinary: Loki is deployed as a single binary, useful for small installs typically without HA, up to a few tens of GB/day. - SimpleScalable: Loki is deployed as 3 targets: read, write, and backend. Useful for medium installs easier to manage than distributed, up to a about 1TB/day. - Distributed: Loki is deployed as individual microservices. The most complicated but most capable, useful for large installs, typically over 1TB/day. There are also 2 additional modes used for migrating between deployment modes: - SingleBinary<->SimpleScalable: Migrate from SingleBinary to SimpleScalable (or vice versa) - SimpleScalable<->Distributed: Migrate from SimpleScalable to Distributed (or vice versa) Note: SimpleScalable and Distributed REQUIRE the use of object storage.
-"/loki/api/v1/push"
+"SimpleScalable"
 
- ingress.tls - list - TLS configuration for the ingress. Hosts passed through the `tpl` function to allow templating + distributor + object + Configuration for the distributor
-[]
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "distributor"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "autoscaling": {
+    "behavior": {
+      "enabled": false,
+      "scaleDown": {},
+      "scaleUp": {}
+    },
+    "customMetrics": [],
+    "enabled": false,
+    "maxReplicas": 3,
+    "minReplicas": 1,
+    "targetCPUUtilizationPercentage": 60,
+    "targetMemoryUtilizationPercentage": null
+  },
+  "command": null,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "maxSurge": 0,
+  "maxUnavailable": null,
+  "nodeSelector": {},
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "replicas": 0,
+  "resources": {},
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": []
+}
 
- kubectlImage.digest - string - Overrides the image tag with an image digest -
-null
+			distributor.affinity
+			object
+			Affinity for distributor pods.
+			
+Hard node anti-affinity
 
- kubectlImage.pullPolicy - string - Docker image pull policy + distributor.appProtocol + object + Adds the appProtocol field to the distributor service. This allows distributor to work with istio protocol selection.
-"IfNotPresent"
+{
+  "grpc": ""
+}
 
- kubectlImage.registry + distributor.appProtocol.grpc string - The Docker registry + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
-"docker.io"
+""
 
- kubectlImage.repository - string - Docker image repository + distributor.autoscaling.behavior.enabled + bool + Enable autoscaling behaviours
-"bitnami/kubectl"
+false
 
- kubectlImage.tag - string - Overrides the image tag whose default is the chart's appVersion + distributor.autoscaling.behavior.scaleDown + object + define scale down policies, must conform to HPAScalingRules
-null
+{}
 
- loki.analytics + distributor.autoscaling.behavior.scaleUp object - Optional analytics configuration + define scale up policies, must conform to HPAScalingRules
 {}
 
- loki.annotations - object - Common annotations for all deployments/StatefulSets + distributor.autoscaling.customMetrics + list + Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics)
-{}
+[]
 
- loki.auth_enabled + distributor.autoscaling.enabled bool - + Enable autoscaling for the distributor
-true
+false
 
- loki.commonConfig - object - Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration + distributor.autoscaling.maxReplicas + int + Maximum autoscaling replicas for the distributor
-{
-  "compactor_address": "{{ include \"loki.compactorAddress\" . }}",
-  "path_prefix": "/var/loki",
-  "replication_factor": 3
-}
+3
 
- loki.compactor - object - Optional compactor configuration + distributor.autoscaling.minReplicas + int + Minimum autoscaling replicas for the distributor
-{}
+1
 
- loki.config - string - Config file contents for Loki -
-See values.yaml
+			distributor.autoscaling.targetCPUUtilizationPercentage
+			int
+			Target CPU utilisation percentage for the distributor
+			
+60
 
- loki.configStorageType + distributor.autoscaling.targetMemoryUtilizationPercentage string - Defines what kind of object stores the configuration, a ConfigMap or a Secret. In order to move sensitive information (such as credentials) from the ConfigMap/Secret to a more secure location (e.g. vault), it is possible to use [environment variables in the configuration](https://grafana.com/docs/loki/latest/configuration/#use-environment-variables-in-the-configuration). Such environment variables can be then stored in a separate Secret and injected via the global.extraEnvFrom value. For details about environment injection from a Secret please see [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-as-container-environment-variables). + Target memory utilisation percentage for the distributor
-"ConfigMap"
+null
 
- loki.containerSecurityContext - object - The SecurityContext for Loki containers + distributor.command + string + Command to execute instead of defined in Docker image
-{
-  "allowPrivilegeEscalation": false,
-  "capabilities": {
-    "drop": [
-      "ALL"
-    ]
-  },
-  "readOnlyRootFilesystem": true
-}
+null
 
- loki.distributor - object - Optional distributor configuration + distributor.extraArgs + list + Additional CLI args for the distributor
-{}
+[]
 
- loki.enableServiceLinks - bool - Should enableServiceLinks be enabled. Default to enable + distributor.extraContainers + list + Containers to add to the distributor pods
-true
+[]
 
- loki.existingSecretForConfig - string - Specify an existing secret containing loki configuration. If non-empty, overrides `loki.config` + distributor.extraEnv + list + Environment variables to add to the distributor pods
-""
+[]
 
- loki.externalConfigSecretName - string - Name of the Secret or ConfigMap that contains the configuration (used for naming even if config is internal). + distributor.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the distributor pods
-"{{ include \"loki.name\" . }}"
+[]
 
- loki.extraMemberlistConfig - object - Extra memberlist configuration + distributor.extraVolumeMounts + list + Volume mounts to add to the distributor pods
-{}
+[]
 
- loki.frontend.scheduler_address - string - + distributor.extraVolumes + list + Volumes to add to the distributor pods
-"{{ include \"loki.querySchedulerAddress\" . }}"
+[]
 
- loki.frontend_worker.scheduler_address - string - + distributor.hostAliases + list + hostAliases to add
-"{{ include \"loki.querySchedulerAddress\" . }}"
+[]
 
- loki.image.digest + distributor.image.registry string - Overrides the image tag with an image digest + The Docker registry for the distributor image. Overrides `loki.image.registry`
 null
 
- loki.image.pullPolicy + distributor.image.repository string - Docker image pull policy + Docker image repository for the distributor image. Overrides `loki.image.repository`
-"IfNotPresent"
+null
 
- loki.image.registry + distributor.image.tag string - The Docker registry + Docker image tag for the distributor image. Overrides `loki.image.tag`
-"docker.io"
+null
 
- loki.image.repository - string - Docker image repository + distributor.maxSurge + int + Max Surge for distributor pods
-"grafana/loki"
+0
 
- loki.image.tag + distributor.maxUnavailable string - Overrides the image tag whose default is the chart's appVersion TODO: needed for 3rd target backend functionality revert to null or latest once this behavior is relased + Pod Disruption Budget maxUnavailable
 null
 
- loki.index_gateway + distributor.nodeSelector object - Optional index gateway configuration + Node selector for distributor pods
-{
-  "mode": "ring"
-}
+{}
 
- loki.ingester + distributor.podAnnotations object - Optional ingester configuration + Annotations for distributor pods
 {}
 
- loki.limits_config + distributor.podLabels object - Limits config + Labels for distributor pods
-{
-  "max_cache_freshness_per_query": "10m",
-  "reject_old_samples": true,
-  "reject_old_samples_max_age": "168h",
-  "split_queries_by_interval": "15m"
-}
+{}
 
- loki.memberlistConfig - object - memberlist configuration (overrides embedded default) + distributor.priorityClassName + string + The name of the PriorityClass for distributor pods
-{}
+null
 
- loki.memcached - object - Configure memcached as an external cache for chunk and results cache. Disabled by default must enable and specify a host for each cache you would like to use. + distributor.replicas + int + Number of replicas for the distributor
-{
-  "chunk_cache": {
-    "batch_size": 256,
-    "enabled": false,
-    "host": "",
-    "parallelism": 10,
-    "service": "memcached-client"
-  },
-  "results_cache": {
-    "default_validity": "12h",
-    "enabled": false,
-    "host": "",
-    "service": "memcached-client",
-    "timeout": "500ms"
-  }
-}
+0
 
- loki.podAnnotations + distributor.resources object - Common annotations for all pods + Resource requests and limits for the distributor
 {}
 
- loki.podLabels + distributor.serviceLabels object - Common labels for all pods + Labels for distributor service
 {}
 
- loki.podSecurityContext - object - The SecurityContext for Loki pods + distributor.terminationGracePeriodSeconds + int + Grace period to allow the distributor to shutdown before it is killed
-{
-  "fsGroup": 10001,
-  "runAsGroup": 10001,
-  "runAsNonRoot": true,
-  "runAsUser": 10001
+30
+
+ + + + distributor.tolerations + list + Tolerations for distributor pods +
+[]
+
+ + + + enterprise + object + Configuration for running Enterprise Loki +
+{
+  "adminApi": {
+    "enabled": true
+  },
+  "adminToken": {
+    "additionalNamespaces": [],
+    "secret": null
+  },
+  "canarySecret": null,
+  "cluster_name": null,
+  "config": "{{- if .Values.enterprise.adminApi.enabled }}\n{{- if or .Values.minio.enabled (eq .Values.loki.storage.type \"s3\") (eq .Values.loki.storage.type \"gcs\") (eq .Values.loki.storage.type \"azure\") }}\nadmin_client:\n  storage:\n    s3:\n      bucket_name: {{ .Values.loki.storage.bucketNames.admin }}\n{{- end }}\n{{- end }}\nauth:\n  type: {{ .Values.enterprise.adminApi.enabled | ternary \"enterprise\" \"trust\" }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\ncluster_name: {{ include \"loki.clusterName\" . }}\nlicense:\n  path: /etc/loki/license/license.jwt\n",
+  "enabled": false,
+  "externalConfigName": "",
+  "externalLicenseName": null,
+  "gelGateway": true,
+  "image": {
+    "digest": null,
+    "pullPolicy": "IfNotPresent",
+    "registry": "docker.io",
+    "repository": "grafana/enterprise-logs",
+    "tag": null
+  },
+  "license": {
+    "contents": "NOTAVALIDLICENSE"
+  },
+  "provisioner": {
+    "additionalTenants": [],
+    "annotations": {},
+    "enabled": true,
+    "env": [],
+    "extraVolumeMounts": [],
+    "image": {
+      "digest": null,
+      "pullPolicy": "IfNotPresent",
+      "registry": "docker.io",
+      "repository": "grafana/enterprise-logs-provisioner",
+      "tag": null
+    },
+    "labels": {},
+    "priorityClassName": null,
+    "provisionedSecretPrefix": null,
+    "securityContext": {
+      "fsGroup": 10001,
+      "runAsGroup": 10001,
+      "runAsNonRoot": true,
+      "runAsUser": 10001
+    }
+  },
+  "tokengen": {
+    "annotations": {},
+    "enabled": true,
+    "env": [],
+    "extraArgs": [],
+    "extraEnvFrom": [],
+    "extraVolumeMounts": [],
+    "extraVolumes": [],
+    "labels": {},
+    "priorityClassName": "",
+    "securityContext": {
+      "fsGroup": 10001,
+      "runAsGroup": 10001,
+      "runAsNonRoot": true,
+      "runAsUser": 10001
+    },
+    "targetModule": "tokengen",
+    "tolerations": []
+  },
+  "useExternalLicense": false,
+  "version": "v1.8.6"
+}
+
+ + + + enterprise.adminApi + object + If enabled, the correct admin_client storage will be configured. If disabled while running enterprise, make sure auth is set to `type: trust`, or that `auth_enabled` is set to `false`. +
+{
+  "enabled": true
+}
+
+ + + + enterprise.adminToken.additionalNamespaces + list + Additional namespace to also create the token in. Useful if your Grafana instance is in a different namespace +
+[]
+
+ + + + enterprise.adminToken.secret + string + Alternative name for admin token secret, needed by tokengen and provisioner jobs +
+null
+
+ + + + enterprise.canarySecret + string + Alternative name of the secret to store token for the canary +
+null
+
+ + + + enterprise.cluster_name + string + Optional name of the GEL cluster, otherwise will use .Release.Name The cluster name must match what is in your GEL license +
+null
+
+ + + + enterprise.externalConfigName + string + Name of the external config secret to use +
+""
+
+ + + + enterprise.externalLicenseName + string + Name of external license secret to use +
+null
+
+ + + + enterprise.gelGateway + bool + Use GEL gateway, if false will use the default nginx gateway +
+true
+
+ + + + enterprise.image.digest + string + Overrides the image tag with an image digest +
+null
+
+ + + + enterprise.image.pullPolicy + string + Docker image pull policy +
+"IfNotPresent"
+
+ + + + enterprise.image.registry + string + The Docker registry +
+"docker.io"
+
+ + + + enterprise.image.repository + string + Docker image repository +
+"grafana/enterprise-logs"
+
+ + + + enterprise.image.tag + string + Docker image tag +
+null
+
+ + + + enterprise.license + object + Grafana Enterprise Logs license In order to use Grafana Enterprise Logs features, you will need to provide the contents of your Grafana Enterprise Logs license, either by providing the contents of the license.jwt, or the name Kubernetes Secret that contains your license.jwt. To set the license contents, use the flag `--set-file 'enterprise.license.contents=./license.jwt'` +
+{
+  "contents": "NOTAVALIDLICENSE"
+}
+
+ + + + enterprise.provisioner + object + Configuration for `provisioner` target +
+{
+  "additionalTenants": [],
+  "annotations": {},
+  "enabled": true,
+  "env": [],
+  "extraVolumeMounts": [],
+  "image": {
+    "digest": null,
+    "pullPolicy": "IfNotPresent",
+    "registry": "docker.io",
+    "repository": "grafana/enterprise-logs-provisioner",
+    "tag": null
+  },
+  "labels": {},
+  "priorityClassName": null,
+  "provisionedSecretPrefix": null,
+  "securityContext": {
+    "fsGroup": 10001,
+    "runAsGroup": 10001,
+    "runAsNonRoot": true,
+    "runAsUser": 10001
+  }
+}
+
+ + + + enterprise.provisioner.additionalTenants + list + Additional tenants to be created. Each tenant will get a read and write policy and associated token. Tenant must have a name and a namespace for the secret containting the token to be created in. For example additionalTenants: - name: loki secretNamespace: grafana +
+[]
+
+ + + + enterprise.provisioner.annotations + object + Additional annotations for the `provisioner` Job +
+{}
+
+ + + + enterprise.provisioner.enabled + bool + Whether the job should be part of the deployment +
+true
+
+ + + + enterprise.provisioner.env + list + Additional Kubernetes environment +
+[]
+
+ + + + enterprise.provisioner.extraVolumeMounts + list + Volume mounts to add to the provisioner pods +
+[]
+
+ + + + enterprise.provisioner.image + object + Provisioner image to Utilize +
+{
+  "digest": null,
+  "pullPolicy": "IfNotPresent",
+  "registry": "docker.io",
+  "repository": "grafana/enterprise-logs-provisioner",
+  "tag": null
+}
+
+ + + + enterprise.provisioner.image.digest + string + Overrides the image tag with an image digest +
+null
+
+ + + + enterprise.provisioner.image.pullPolicy + string + Docker image pull policy +
+"IfNotPresent"
+
+ + + + enterprise.provisioner.image.registry + string + The Docker registry +
+"docker.io"
+
+ + + + enterprise.provisioner.image.repository + string + Docker image repository +
+"grafana/enterprise-logs-provisioner"
+
+ + + + enterprise.provisioner.image.tag + string + Overrides the image tag whose default is the chart's appVersion +
+null
+
+ + + + enterprise.provisioner.labels + object + Additional labels for the `provisioner` Job +
+{}
+
+ + + + enterprise.provisioner.priorityClassName + string + The name of the PriorityClass for provisioner Job +
+null
+
+ + + + enterprise.provisioner.provisionedSecretPrefix + string + Name of the secret to store provisioned tokens in +
+null
+
+ + + + enterprise.provisioner.securityContext + object + Run containers as user `enterprise-logs(uid=10001)` +
+{
+  "fsGroup": 10001,
+  "runAsGroup": 10001,
+  "runAsNonRoot": true,
+  "runAsUser": 10001
+}
+
+ + + + enterprise.tokengen + object + Configuration for `tokengen` target +
+{
+  "annotations": {},
+  "enabled": true,
+  "env": [],
+  "extraArgs": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "labels": {},
+  "priorityClassName": "",
+  "securityContext": {
+    "fsGroup": 10001,
+    "runAsGroup": 10001,
+    "runAsNonRoot": true,
+    "runAsUser": 10001
+  },
+  "targetModule": "tokengen",
+  "tolerations": []
+}
+
+ + + + enterprise.tokengen.annotations + object + Additional annotations for the `tokengen` Job +
+{}
+
+ + + + enterprise.tokengen.enabled + bool + Whether the job should be part of the deployment +
+true
+
+ + + + enterprise.tokengen.env + list + Additional Kubernetes environment +
+[]
+
+ + + + enterprise.tokengen.extraArgs + list + Additional CLI arguments for the `tokengen` target +
+[]
+
+ + + + enterprise.tokengen.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the tokengen pods +
+[]
+
+ + + + enterprise.tokengen.extraVolumeMounts + list + Additional volume mounts for Pods +
+[]
+
+ + + + enterprise.tokengen.extraVolumes + list + Additional volumes for Pods +
+[]
+
+ + + + enterprise.tokengen.labels + object + Additional labels for the `tokengen` Job +
+{}
+
+ + + + enterprise.tokengen.priorityClassName + string + The name of the PriorityClass for tokengen Pods +
+""
+
+ + + + enterprise.tokengen.securityContext + object + Run containers as user `enterprise-logs(uid=10001)` +
+{
+  "fsGroup": 10001,
+  "runAsGroup": 10001,
+  "runAsNonRoot": true,
+  "runAsUser": 10001
+}
+
+ + + + enterprise.tokengen.targetModule + string + Comma-separated list of Loki modules to load for tokengen +
+"tokengen"
+
+ + + + enterprise.tokengen.tolerations + list + Tolerations for tokengen Job +
+[]
+
+ + + + enterprise.useExternalLicense + bool + Set to true when providing an external license +
+false
+
+ + + + enterpriseGateway + object + If running enterprise and using the default enterprise gateway, configs go here. +
+{
+  "affinity": {},
+  "annotations": {},
+  "containerSecurityContext": {
+    "allowPrivilegeEscalation": false,
+    "capabilities": {
+      "drop": [
+        "ALL"
+      ]
+    },
+    "readOnlyRootFilesystem": true
+  },
+  "env": [],
+  "extraArgs": {},
+  "extraContainers": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "initContainers": [],
+  "labels": {},
+  "nodeSelector": {},
+  "podSecurityContext": {
+    "fsGroup": 10001,
+    "runAsGroup": 10001,
+    "runAsNonRoot": true,
+    "runAsUser": 10001
+  },
+  "readinessProbe": {
+    "httpGet": {
+      "path": "/ready",
+      "port": "http-metrics"
+    },
+    "initialDelaySeconds": 45
+  },
+  "replicas": 1,
+  "resources": {},
+  "service": {
+    "annotations": {},
+    "labels": {},
+    "type": "ClusterIP"
+  },
+  "strategy": {
+    "type": "RollingUpdate"
+  },
+  "terminationGracePeriodSeconds": 60,
+  "tolerations": [],
+  "useDefaultProxyURLs": true
+}
+
+ + + + enterpriseGateway.affinity + object + Affinity for gateway Pods +
+{}
+
+ + + + enterpriseGateway.annotations + object + Additional annotations for the `gateway` Pod +
+{}
+
+ + + + enterpriseGateway.env + list + Configure optional environment variables +
+[]
+
+ + + + enterpriseGateway.extraArgs + object + Additional CLI arguments for the `gateway` target +
+{}
+
+ + + + enterpriseGateway.extraContainers + list + Conifgure optional extraContainers +
+[]
+
+ + + + enterpriseGateway.extraVolumeMounts + list + Additional volume mounts for Pods +
+[]
+
+ + + + enterpriseGateway.extraVolumes + list + Additional volumes for Pods +
+[]
+
+ + + + enterpriseGateway.hostAliases + list + hostAliases to add +
+[]
+
+ + + + enterpriseGateway.initContainers + list + Configure optional initContainers +
+[]
+
+ + + + enterpriseGateway.labels + object + Additional labels for the `gateway` Pod +
+{}
+
+ + + + enterpriseGateway.nodeSelector + object + Node selector for gateway Pods +
+{}
+
+ + + + enterpriseGateway.podSecurityContext + object + Run container as user `enterprise-logs(uid=10001)` +
+{
+  "fsGroup": 10001,
+  "runAsGroup": 10001,
+  "runAsNonRoot": true,
+  "runAsUser": 10001
+}
+
+ + + + enterpriseGateway.readinessProbe + object + Readiness probe +
+{
+  "httpGet": {
+    "path": "/ready",
+    "port": "http-metrics"
+  },
+  "initialDelaySeconds": 45
+}
+
+ + + + enterpriseGateway.replicas + int + Define the amount of instances +
+1
+
+ + + + enterpriseGateway.resources + object + Values are defined in small.yaml and large.yaml +
+{}
+
+ + + + enterpriseGateway.service + object + Service overriding service type +
+{
+  "annotations": {},
+  "labels": {},
+  "type": "ClusterIP"
+}
+
+ + + + enterpriseGateway.strategy + object + update strategy +
+{
+  "type": "RollingUpdate"
+}
+
+ + + + enterpriseGateway.terminationGracePeriodSeconds + int + Grace period to allow the gateway to shutdown before it is killed +
+60
+
+ + + + enterpriseGateway.tolerations + list + Tolerations for gateway Pods +
+[]
+
+ + + + enterpriseGateway.useDefaultProxyURLs + bool + If you want to use your own proxy URLs, set this to false. +
+true
+
+ + + + extraObjects + list + +
+[]
+
+ + + + fullnameOverride + string + Overrides the chart's computed fullname +
+null
+
+ + + + gateway.affinity + object + Affinity for gateway pods. +
+Hard node anti-affinity
+
+ + + + gateway.annotations + object + Annotations for gateway deployment +
+{}
+
+ + + + gateway.autoscaling.behavior + object + Behavior policies while scaling. +
+{}
+
+ + + + gateway.autoscaling.enabled + bool + Enable autoscaling for the gateway +
+false
+
+ + + + gateway.autoscaling.maxReplicas + int + Maximum autoscaling replicas for the gateway +
+3
+
+ + + + gateway.autoscaling.minReplicas + int + Minimum autoscaling replicas for the gateway +
+1
+
+ + + + gateway.autoscaling.targetCPUUtilizationPercentage + int + Target CPU utilisation percentage for the gateway +
+60
+
+ + + + gateway.autoscaling.targetMemoryUtilizationPercentage + string + Target memory utilisation percentage for the gateway +
+null
+
+ + + + gateway.basicAuth.enabled + bool + Enables basic authentication for the gateway +
+false
+
+ + + + gateway.basicAuth.existingSecret + string + Existing basic auth secret to use. Must contain '.htpasswd' +
+null
+
+ + + + gateway.basicAuth.htpasswd + string + Uses the specified users from the `loki.tenants` list to create the htpasswd file if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes high CPU load. +
+"{{ if .Values.loki.tenants }}\n\n  {{- range $t := .Values.loki.tenants }}\n{{ htpasswd (required \"All tenants must have a 'name' set\" $t.name) (required \"All tenants must have a 'password' set\" $t.password) }}\n\n  {{- end }}\n{{ else }} {{ htpasswd (required \"'gateway.basicAuth.username' is required\" .Values.gateway.basicAuth.username) (required \"'gateway.basicAuth.password' is required\" .Values.gateway.basicAuth.password) }} {{ end }}"
+
+ + + + gateway.basicAuth.password + string + The basic auth password for the gateway +
+null
+
+ + + + gateway.basicAuth.username + string + The basic auth username for the gateway +
+null
+
+ + + + gateway.containerSecurityContext + object + The SecurityContext for gateway containers +
+{
+  "allowPrivilegeEscalation": false,
+  "capabilities": {
+    "drop": [
+      "ALL"
+    ]
+  },
+  "readOnlyRootFilesystem": true
+}
+
+ + + + gateway.deploymentStrategy.type + string + +
+"RollingUpdate"
+
+ + + + gateway.dnsConfig + object + DNS config for gateway pods +
+{}
+
+ + + + gateway.enabled + bool + Specifies whether the gateway should be enabled +
+true
+
+ + + + gateway.extraArgs + list + Additional CLI args for the gateway +
+[]
+
+ + + + gateway.extraContainers + list + Containers to add to the gateway pods +
+[]
+
+ + + + gateway.extraEnv + list + Environment variables to add to the gateway pods +
+[]
+
+ + + + gateway.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the gateway pods +
+[]
+
+ + + + gateway.extraVolumeMounts + list + Volume mounts to add to the gateway pods +
+[]
+
+ + + + gateway.extraVolumes + list + Volumes to add to the gateway pods +
+[]
+
+ + + + gateway.image.digest + string + Overrides the gateway image tag with an image digest +
+null
+
+ + + + gateway.image.pullPolicy + string + The gateway image pull policy +
+"IfNotPresent"
+
+ + + + gateway.image.registry + string + The Docker registry for the gateway image +
+"docker.io"
+
+ + + + gateway.image.repository + string + The gateway image repository +
+"nginxinc/nginx-unprivileged"
+
+ + + + gateway.image.tag + string + The gateway image tag +
+"1.24-alpine"
+
+ + + + gateway.ingress.annotations + object + Annotations for the gateway ingress +
+{}
+
+ + + + gateway.ingress.enabled + bool + Specifies whether an ingress for the gateway should be created +
+false
+
+ + + + gateway.ingress.hosts + list + Hosts configuration for the gateway ingress, passed through the `tpl` function to allow templating +
+[
+  {
+    "host": "gateway.loki.example.com",
+    "paths": [
+      {
+        "path": "/"
+      }
+    ]
+  }
+]
+
+ + + + gateway.ingress.ingressClassName + string + Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 +
+""
+
+ + + + gateway.ingress.labels + object + Labels for the gateway ingress +
+{}
+
+ + + + gateway.ingress.tls + list + TLS configuration for the gateway ingress. Hosts passed through the `tpl` function to allow templating +
+[
+  {
+    "hosts": [
+      "gateway.loki.example.com"
+    ],
+    "secretName": "loki-gateway-tls"
+  }
+]
+
+ + + + gateway.lifecycle + object + Lifecycle for the gateway container +
+{}
+
+ + + + gateway.nginxConfig.customBackendUrl + string + Override Backend URL +
+null
+
+ + + + gateway.nginxConfig.customReadUrl + string + Override Read URL +
+null
+
+ + + + gateway.nginxConfig.customWriteUrl + string + Override Write URL +
+null
+
+ + + + gateway.nginxConfig.enableIPv6 + bool + Enable listener for IPv6, disable on IPv4-only systems +
+true
+
+ + + + gateway.nginxConfig.file + string + Config file contents for Nginx. Passed through the `tpl` function to allow templating +
+See values.yaml
+
+ + + + gateway.nginxConfig.httpSnippet + string + Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating +
+"{{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }}"
+
+ + + + gateway.nginxConfig.logFormat + string + NGINX log format +
+"main '$remote_addr - $remote_user [$time_local]  $status '\n        '\"$request\" $body_bytes_sent \"$http_referer\" '\n        '\"$http_user_agent\" \"$http_x_forwarded_for\"';"
+
+ + + + gateway.nginxConfig.resolver + string + Allows overriding the DNS resolver address nginx will use. +
+""
+
+ + + + gateway.nginxConfig.schema + string + Which schema to be used when building URLs. Can be 'http' or 'https'. +
+"http"
+
+ + + + gateway.nginxConfig.serverSnippet + string + Allows appending custom configuration to the server block +
+""
+
+ + + + gateway.nginxConfig.ssl + bool + Whether ssl should be appended to the listen directive of the server block or not. +
+false
+
+ + + + gateway.nodeSelector + object + Node selector for gateway pods +
+{}
+
+ + + + gateway.podAnnotations + object + Annotations for gateway pods +
+{}
+
+ + + + gateway.podLabels + object + Additional labels for gateway pods +
+{}
+
+ + + + gateway.podSecurityContext + object + The SecurityContext for gateway containers +
+{
+  "fsGroup": 101,
+  "runAsGroup": 101,
+  "runAsNonRoot": true,
+  "runAsUser": 101
+}
+
+ + + + gateway.priorityClassName + string + The name of the PriorityClass for gateway pods +
+null
+
+ + + + gateway.readinessProbe.httpGet.path + string + +
+"/"
+
+ + + + gateway.readinessProbe.httpGet.port + string + +
+"http-metrics"
+
+ + + + gateway.readinessProbe.initialDelaySeconds + int + +
+15
+
+ + + + gateway.readinessProbe.timeoutSeconds + int + +
+1
+
+ + + + gateway.replicas + int + Number of replicas for the gateway +
+1
+
+ + + + gateway.resources + object + Resource requests and limits for the gateway +
+{}
+
+ + + + gateway.service.annotations + object + Annotations for the gateway service +
+{}
+
+ + + + gateway.service.clusterIP + string + ClusterIP of the gateway service +
+null
+
+ + + + gateway.service.labels + object + Labels for gateway service +
+{}
+
+ + + + gateway.service.loadBalancerIP + string + Load balancer IPO address if service type is LoadBalancer +
+null
+
+ + + + gateway.service.nodePort + int + Node port if service type is NodePort +
+null
+
+ + + + gateway.service.port + int + Port of the gateway service +
+80
+
+ + + + gateway.service.type + string + Type of the gateway service +
+"ClusterIP"
+
+ + + + gateway.terminationGracePeriodSeconds + int + Grace period to allow the gateway to shutdown before it is killed +
+30
+
+ + + + gateway.tolerations + list + Tolerations for gateway pods +
+[]
+
+ + + + gateway.topologySpreadConstraints + list + Topology Spread Constraints for gateway pods +
+[]
+
+ + + + gateway.verboseLogging + bool + Enable logging of 2xx and 3xx HTTP requests +
+true
+
+ + + + global.clusterDomain + string + configures cluster domain ("cluster.local" by default) +
+"cluster.local"
+
+ + + + global.dnsNamespace + string + configures DNS service namespace +
+"kube-system"
+
+ + + + global.dnsService + string + configures DNS service name +
+"kube-dns"
+
+ + + + global.image.registry + string + Overrides the Docker registry globally for all images +
+null
+
+ + + + global.priorityClassName + string + Overrides the priorityClassName for all pods +
+null
+
+ + + + imagePullSecrets + list + Image pull secrets for Docker images +
+[]
+
+ + + + indexGateway + object + Configuration for the index-gateway +
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "index-gateway"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "joinMemberlist": true,
+  "maxUnavailable": null,
+  "nodeSelector": {},
+  "persistence": {
+    "annotations": {},
+    "enableStatefulSetAutoDeletePVC": false,
+    "enabled": false,
+    "inMemory": false,
+    "size": "10Gi",
+    "storageClass": null,
+    "whenDeleted": "Retain",
+    "whenScaled": "Retain"
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "replicas": 0,
+  "resources": {},
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 300,
+  "tolerations": []
+}
+
+ + + + indexGateway.affinity + object + Affinity for index-gateway pods. +
+Hard node anti-affinity
+
+ + + + indexGateway.appProtocol + object + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" +
+{
+  "grpc": ""
+}
+
+ + + + indexGateway.extraArgs + list + Additional CLI args for the index-gateway +
+[]
+
+ + + + indexGateway.extraContainers + list + Containers to add to the index-gateway pods +
+[]
+
+ + + + indexGateway.extraEnv + list + Environment variables to add to the index-gateway pods +
+[]
+
+ + + + indexGateway.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the index-gateway pods +
+[]
+
+ + + + indexGateway.extraVolumeMounts + list + Volume mounts to add to the index-gateway pods +
+[]
+
+ + + + indexGateway.extraVolumes + list + Volumes to add to the index-gateway pods +
+[]
+
+ + + + indexGateway.hostAliases + list + hostAliases to add +
+[]
+
+ + + + indexGateway.image.registry + string + The Docker registry for the index-gateway image. Overrides `loki.image.registry` +
+null
+
+ + + + indexGateway.image.repository + string + Docker image repository for the index-gateway image. Overrides `loki.image.repository` +
+null
+
+ + + + indexGateway.image.tag + string + Docker image tag for the index-gateway image. Overrides `loki.image.tag` +
+null
+
+ + + + indexGateway.initContainers + list + Init containers to add to the index-gateway pods +
+[]
+
+ + + + indexGateway.joinMemberlist + bool + Whether the index gateway should join the memberlist hashring +
+true
+
+ + + + indexGateway.maxUnavailable + string + Pod Disruption Budget maxUnavailable +
+null
+
+ + + + indexGateway.nodeSelector + object + Node selector for index-gateway pods +
+{}
+
+ + + + indexGateway.persistence.annotations + object + Annotations for index gateway PVCs +
+{}
+
+ + + + indexGateway.persistence.enableStatefulSetAutoDeletePVC + bool + Enable StatefulSetAutoDeletePVC feature +
+false
+
+ + + + indexGateway.persistence.enabled + bool + Enable creating PVCs which is required when using boltdb-shipper +
+false
+
+ + + + indexGateway.persistence.inMemory + bool + Use emptyDir with ramdisk for storage. **Please note that all data in indexGateway will be lost on pod restart** +
+false
+
+ + + + indexGateway.persistence.size + string + Size of persistent or memory disk +
+"10Gi"
+
+ + + + indexGateway.persistence.storageClass + string + Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). +
+null
+
+ + + + indexGateway.podAnnotations + object + Annotations for index-gateway pods +
+{}
+
+ + + + indexGateway.podLabels + object + Labels for index-gateway pods +
+{}
+
+ + + + indexGateway.priorityClassName + string + The name of the PriorityClass for index-gateway pods +
+null
+
+ + + + indexGateway.replicas + int + Number of replicas for the index-gateway +
+0
+
+ + + + indexGateway.resources + object + Resource requests and limits for the index-gateway +
+{}
+
+ + + + indexGateway.serviceLabels + object + Labels for index-gateway service +
+{}
+
+ + + + indexGateway.terminationGracePeriodSeconds + int + Grace period to allow the index-gateway to shutdown before it is killed. +
+300
+
+ + + + indexGateway.tolerations + list + Tolerations for index-gateway pods +
+[]
+
+ + + + ingester + object + Configuration for the ingester +
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "ingester"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "autoscaling": {
+    "behavior": {
+      "enabled": false,
+      "scaleDown": {},
+      "scaleUp": {}
+    },
+    "customMetrics": [],
+    "enabled": false,
+    "maxReplicas": 3,
+    "minReplicas": 1,
+    "targetCPUUtilizationPercentage": 60,
+    "targetMemoryUtilizationPercentage": null
+  },
+  "command": null,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "lifecycle": {},
+  "livenessProbe": {},
+  "maxUnavailable": 1,
+  "nodeSelector": {},
+  "persistence": {
+    "claims": [
+      {
+        "name": "data",
+        "size": "10Gi",
+        "storageClass": null
+      }
+    ],
+    "enableStatefulSetAutoDeletePVC": false,
+    "enabled": false,
+    "inMemory": false,
+    "whenDeleted": "Retain",
+    "whenScaled": "Retain"
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "readinessProbe": {},
+  "replicas": 0,
+  "resources": {},
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 300,
+  "tolerations": [],
+  "topologySpreadConstraints": [
+    {
+      "labelSelector": {
+        "matchLabels": {
+          "app.kubernetes.io/component": "ingester"
+        }
+      },
+      "maxSkew": 1,
+      "topologyKey": "kubernetes.io/hostname",
+      "whenUnsatisfiable": "ScheduleAnyway"
+    }
+  ],
+  "zoneAwareReplication": {
+    "enabled": true,
+    "maxUnavailablePct": 33,
+    "migration": {
+      "enabled": false,
+      "excludeDefaultZone": false,
+      "readPath": false,
+      "writePath": false
+    },
+    "zoneA": {
+      "annotations": {},
+      "extraAffinity": {},
+      "nodeSelector": null,
+      "podAnnotations": {}
+    },
+    "zoneB": {
+      "annotations": {},
+      "extraAffinity": {},
+      "nodeSelector": null,
+      "podAnnotations": {}
+    },
+    "zoneC": {
+      "annotations": {},
+      "extraAffinity": {},
+      "nodeSelector": null,
+      "podAnnotations": {}
+    }
+  }
+}
+
+ + + + ingester.affinity + object + Affinity for ingester pods. Ignored if zoneAwareReplication is enabled. +
+Hard node anti-affinity
+
+ + + + ingester.appProtocol + object + Adds the appProtocol field to the ingester service. This allows ingester to work with istio protocol selection. +
+{
+  "grpc": ""
+}
+
+ + + + ingester.appProtocol.grpc + string + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" +
+""
+
+ + + + ingester.autoscaling.behavior.enabled + bool + Enable autoscaling behaviours +
+false
+
+ + + + ingester.autoscaling.behavior.scaleDown + object + define scale down policies, must conform to HPAScalingRules +
+{}
+
+ + + + ingester.autoscaling.behavior.scaleUp + object + define scale up policies, must conform to HPAScalingRules +
+{}
+
+ + + + ingester.autoscaling.customMetrics + list + Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics) +
+[]
+
+ + + + ingester.autoscaling.enabled + bool + Enable autoscaling for the ingester +
+false
+
+ + + + ingester.autoscaling.maxReplicas + int + Maximum autoscaling replicas for the ingester +
+3
+
+ + + + ingester.autoscaling.minReplicas + int + Minimum autoscaling replicas for the ingester +
+1
+
+ + + + ingester.autoscaling.targetCPUUtilizationPercentage + int + Target CPU utilisation percentage for the ingester +
+60
+
+ + + + ingester.autoscaling.targetMemoryUtilizationPercentage + string + Target memory utilisation percentage for the ingester +
+null
+
+ + + + ingester.command + string + Command to execute instead of defined in Docker image +
+null
+
+ + + + ingester.extraArgs + list + Additional CLI args for the ingester +
+[]
+
+ + + + ingester.extraContainers + list + Containers to add to the ingester pods +
+[]
+
+ + + + ingester.extraEnv + list + Environment variables to add to the ingester pods +
+[]
+
+ + + + ingester.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the ingester pods +
+[]
+
+ + + + ingester.extraVolumeMounts + list + Volume mounts to add to the ingester pods +
+[]
+
+ + + + ingester.extraVolumes + list + Volumes to add to the ingester pods +
+[]
+
+ + + + ingester.hostAliases + list + hostAliases to add +
+[]
+
+ + + + ingester.image.registry + string + The Docker registry for the ingester image. Overrides `loki.image.registry` +
+null
+
+ + + + ingester.image.repository + string + Docker image repository for the ingester image. Overrides `loki.image.repository` +
+null
+
+ + + + ingester.image.tag + string + Docker image tag for the ingester image. Overrides `loki.image.tag` +
+null
+
+ + + + ingester.initContainers + list + Init containers to add to the ingester pods +
+[]
+
+ + + + ingester.lifecycle + object + Lifecycle for the ingester container +
+{}
+
+ + + + ingester.livenessProbe + object + liveness probe settings for ingester pods. If empty use `loki.livenessProbe` +
+{}
+
+ + + + ingester.maxUnavailable + int + Pod Disruption Budget maxUnavailable +
+1
+
+ + + + ingester.nodeSelector + object + Node selector for ingester pods +
+{}
+
+ + + + ingester.persistence.claims + list + List of the ingester PVCs +
+
+
+ + + + ingester.persistence.enableStatefulSetAutoDeletePVC + bool + Enable StatefulSetAutoDeletePVC feature +
+false
+
+ + + + ingester.persistence.enabled + bool + Enable creating PVCs which is required when using boltdb-shipper +
+false
+
+ + + + ingester.persistence.inMemory + bool + Use emptyDir with ramdisk for storage. **Please note that all data in ingester will be lost on pod restart** +
+false
+
+ + + + ingester.podAnnotations + object + Annotations for ingester pods +
+{}
+
+ + + + ingester.podLabels + object + Labels for ingester pods +
+{}
+
+ + + + ingester.readinessProbe + object + readiness probe settings for ingester pods. If empty, use `loki.readinessProbe` +
+{}
+
+ + + + ingester.replicas + int + Number of replicas for the ingester, when zoneAwareReplication.enabled is true, the total number of replicas will match this value with each zone having 1/3rd of the total replicas. +
+0
+
+ + + + ingester.resources + object + Resource requests and limits for the ingester +
+{}
+
+ + + + ingester.serviceLabels + object + Labels for ingestor service +
+{}
+
+ + + + ingester.terminationGracePeriodSeconds + int + Grace period to allow the ingester to shutdown before it is killed. Especially for the ingestor, this must be increased. It must be long enough so ingesters can be gracefully shutdown flushing/transferring all data and to successfully leave the member ring on shutdown. +
+300
+
+ + + + ingester.tolerations + list + Tolerations for ingester pods +
+[]
+
+ + + + ingester.topologySpreadConstraints + list + topologySpread for ingester pods. +
+Defaults to allow skew no more than 1 node
+
+ + + + ingester.zoneAwareReplication + object + Enabling zone awareness on ingesters will create 3 statefulests where all writes will send a replica to each zone. This is primarily intended to accelerate rollout operations by allowing for multiple ingesters within a single zone to be shutdown and restart simultaneously (the remaining 2 zones will be guaranteed to have at least one copy of the data). Note: This can be used to run Loki over multiple cloud provider availability zones however this is not currently recommended as Loki is not optimized for this and cross zone network traffic costs can become extremely high extremely quickly. Even with zone awareness enabled, it is recommended to run Loki in a single availability zone. +
+{
+  "enabled": true,
+  "maxUnavailablePct": 33,
+  "migration": {
+    "enabled": false,
+    "excludeDefaultZone": false,
+    "readPath": false,
+    "writePath": false
+  },
+  "zoneA": {
+    "annotations": {},
+    "extraAffinity": {},
+    "nodeSelector": null,
+    "podAnnotations": {}
+  },
+  "zoneB": {
+    "annotations": {},
+    "extraAffinity": {},
+    "nodeSelector": null,
+    "podAnnotations": {}
+  },
+  "zoneC": {
+    "annotations": {},
+    "extraAffinity": {},
+    "nodeSelector": null,
+    "podAnnotations": {}
+  }
+}
+
+ + + + ingester.zoneAwareReplication.enabled + bool + Enable zone awareness. +
+true
+
+ + + + ingester.zoneAwareReplication.maxUnavailablePct + int + The percent of replicas in each zone that will be restarted at once. In a value of 0-100 +
+33
+
+ + + + ingester.zoneAwareReplication.migration + object + The migration block allows migrating non zone aware ingesters to zone aware ingesters. +
+{
+  "enabled": false,
+  "excludeDefaultZone": false,
+  "readPath": false,
+  "writePath": false
+}
+
+ + + + ingester.zoneAwareReplication.zoneA + object + zoneA configuration +
+{
+  "annotations": {},
+  "extraAffinity": {},
+  "nodeSelector": null,
+  "podAnnotations": {}
+}
+
+ + + + ingester.zoneAwareReplication.zoneA.annotations + object + Specific annotations to add to zone A statefulset +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneA.extraAffinity + object + optionally define extra affinity rules, by default different zones are not allowed to schedule on the same host +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneA.nodeSelector + string + optionally define a node selector for this zone +
+null
+
+ + + + ingester.zoneAwareReplication.zoneA.podAnnotations + object + Specific annotations to add to zone A pods +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneB.annotations + object + Specific annotations to add to zone B statefulset +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneB.extraAffinity + object + optionally define extra affinity rules, by default different zones are not allowed to schedule on the same host +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneB.nodeSelector + string + optionally define a node selector for this zone +
+null
+
+ + + + ingester.zoneAwareReplication.zoneB.podAnnotations + object + Specific annotations to add to zone B pods +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneC.annotations + object + Specific annotations to add to zone C statefulset +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneC.extraAffinity + object + optionally define extra affinity rules, by default different zones are not allowed to schedule on the same host +
+{}
+
+ + + + ingester.zoneAwareReplication.zoneC.nodeSelector + string + optionally define a node selector for this zone +
+null
+
+ + + + ingester.zoneAwareReplication.zoneC.podAnnotations + object + Specific annotations to add to zone C pods +
+{}
+
+ + + + ingress + object + Ingress configuration Use either this ingress or the gateway, but not both at once. If you enable this, make sure to disable the gateway. You'll need to supply authn configuration for your ingress controller. +
+{
+  "annotations": {},
+  "enabled": false,
+  "hosts": [
+    "loki.example.com"
+  ],
+  "ingressClassName": "",
+  "labels": {},
+  "paths": {
+    "read": [
+      "/api/prom/tail",
+      "/loki/api/v1/tail",
+      "/loki/api",
+      "/api/prom/rules",
+      "/loki/api/v1/rules",
+      "/prometheus/api/v1/rules",
+      "/prometheus/api/v1/alerts"
+    ],
+    "singleBinary": [
+      "/api/prom/push",
+      "/loki/api/v1/push",
+      "/api/prom/tail",
+      "/loki/api/v1/tail",
+      "/loki/api",
+      "/api/prom/rules",
+      "/loki/api/v1/rules",
+      "/prometheus/api/v1/rules",
+      "/prometheus/api/v1/alerts"
+    ],
+    "write": [
+      "/api/prom/push",
+      "/loki/api/v1/push"
+    ]
+  },
+  "tls": []
+}
+
+ + + + ingress.hosts + list + Hosts configuration for the ingress, passed through the `tpl` function to allow templating +
+[
+  "loki.example.com"
+]
+
+ + + + ingress.tls + list + TLS configuration for the ingress. Hosts passed through the `tpl` function to allow templating +
+[]
+
+ + + + kubectlImage + object + kubetclImage is used in the enterprise provisioner and tokengen jobs +
+{
+  "digest": null,
+  "pullPolicy": "IfNotPresent",
+  "registry": "docker.io",
+  "repository": "bitnami/kubectl",
+  "tag": null
+}
+
+ + + + kubectlImage.digest + string + Overrides the image tag with an image digest +
+null
+
+ + + + kubectlImage.pullPolicy + string + Docker image pull policy +
+"IfNotPresent"
+
+ + + + kubectlImage.registry + string + The Docker registry +
+"docker.io"
+
+ + + + kubectlImage.repository + string + Docker image repository +
+"bitnami/kubectl"
+
+ + + + kubectlImage.tag + string + Overrides the image tag whose default is the chart's appVersion +
+null
+
+ + + + loki + object + Configuration for running Loki +
+{
+  "analytics": {},
+  "annotations": {},
+  "auth_enabled": true,
+  "commonConfig": {
+    "compactor_address": "{{ include \"loki.compactorAddress\" . }}",
+    "path_prefix": "/var/loki",
+    "replication_factor": 3
+  },
+  "compactor": {},
+  "config": "{{- if .Values.enterprise.enabled}}\n{{- tpl .Values.enterprise.config . }}\n{{- else }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\n{{- end }}\n\n{{- with .Values.loki.server }}\nserver:\n  {{- toYaml . | nindent 2}}\n{{- end}}\n\nmemberlist:\n{{- if .Values.loki.memberlistConfig }}\n  {{- toYaml .Values.loki.memberlistConfig | nindent 2 }}\n{{- else }}\n{{- if .Values.loki.extraMemberlistConfig}}\n{{- toYaml .Values.loki.extraMemberlistConfig | nindent 2}}\n{{- end }}\n  join_members:\n    - {{ include \"loki.memberlist\" . }}\n    {{- with .Values.migrate.fromDistributed }}\n    {{- if .enabled }}\n    - {{ .memberlistService }}\n    {{- end }}\n    {{- end }}\n{{- end }}\n\n{{- with .Values.loki.ingester }}\ningester:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- if .Values.loki.commonConfig}}\ncommon:\n{{- toYaml .Values.loki.commonConfig | nindent 2}}\n  storage:\n  {{- include \"loki.commonStorageConfig\" . | nindent 4}}\n{{- end}}\n\n{{- with .Values.loki.limits_config }}\nlimits_config:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\nruntime_config:\n  file: /etc/loki/runtime-config/runtime-config.yaml\n\n{{- with .Values.chunksCache }}\n{{- if .enabled }}\nchunk_store_config:\n  chunk_cache_config:\n    default_validity: {{ .defaultValidity }}\n    background:\n      writeback_goroutines: {{ .writebackParallelism }}\n      writeback_buffer: {{ .writebackBuffer }}\n      writeback_size_limit: {{ .writebackSizeLimit }}\n    memcached:\n      batch_size: {{ .batchSize }}\n      parallelism: {{ .parallelism }}\n    memcached_client:\n      addresses: dnssrvnoa+_memcached-client._tcp.{{ template \"loki.fullname\" $ }}-chunks-cache.{{ $.Release.Namespace }}.svc\n      consistent_hash: true\n      timeout: {{ .timeout }}\n      max_idle_conns: 72\n{{- end }}\n{{- end }}\n\n{{- if .Values.loki.schemaConfig }}\nschema_config:\n{{- toYaml .Values.loki.schemaConfig | nindent 2}}\n{{- end }}\n\n{{- if .Values.loki.useTestSchema }}\nschema_config:\n{{- toYaml .Values.loki.testSchemaConfig | nindent 2}}\n{{- end }}\n\n{{ include \"loki.rulerConfig\" . }}\n\n{{- if or .Values.tableManager.retention_deletes_enabled .Values.tableManager.retention_period }}\ntable_manager:\n  retention_deletes_enabled: {{ .Values.tableManager.retention_deletes_enabled }}\n  retention_period: {{ .Values.tableManager.retention_period }}\n{{- end }}\n\nquery_range:\n  align_queries_with_step: true\n  {{- with .Values.loki.query_range }}\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n  {{- end }}\n  {{- if .Values.resultsCache.enabled }}\n  {{- with .Values.resultsCache }}\n  cache_results: true\n  results_cache:\n    cache:\n      default_validity: {{ .defaultValidity }}\n      background:\n        writeback_goroutines: {{ .writebackParallelism }}\n        writeback_buffer: {{ .writebackBuffer }}\n        writeback_size_limit: {{ .writebackSizeLimit }}\n      memcached_client:\n        consistent_hash: true\n        addresses: dnssrvnoa+_memcached-client._tcp.{{ template \"loki.fullname\" $ }}-results-cache.{{ $.Release.Namespace }}.svc\n        timeout: {{ .timeout }}\n        update_interval: 1m\n  {{- end }}\n  {{- end }}\n\n{{- with .Values.loki.storage_config }}\nstorage_config:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.query_scheduler }}\nquery_scheduler:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.compactor }}\ncompactor:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.analytics }}\nanalytics:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.querier }}\nquerier:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.index_gateway }}\nindex_gateway:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.frontend }}\nfrontend:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.frontend_worker }}\nfrontend_worker:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\n{{- with .Values.loki.distributor }}\ndistributor:\n  {{- tpl (. | toYaml) $ | nindent 4 }}\n{{- end }}\n\ntracing:\n  enabled: {{ .Values.loki.tracing.enabled }}\n",
+  "configObjectName": "{{ include \"loki.name\" . }}",
+  "configStorageType": "ConfigMap",
+  "containerSecurityContext": {
+    "allowPrivilegeEscalation": false,
+    "capabilities": {
+      "drop": [
+        "ALL"
+      ]
+    },
+    "readOnlyRootFilesystem": true
+  },
+  "distributor": {},
+  "enableServiceLinks": true,
+  "extraMemberlistConfig": {},
+  "frontend": {
+    "scheduler_address": "{{ include \"loki.querySchedulerAddress\" . }}",
+    "tail_proxy_url": "{{ include \"loki.querierAddress\" . }}"
+  },
+  "frontend_worker": {
+    "scheduler_address": "{{ include \"loki.querySchedulerAddress\" . }}"
+  },
+  "generatedConfigObjectName": "{{ include \"loki.name\" . }}",
+  "image": {
+    "digest": null,
+    "pullPolicy": "IfNotPresent",
+    "registry": "docker.io",
+    "repository": "grafana/loki",
+    "tag": null
+  },
+  "index_gateway": {
+    "mode": "simple"
+  },
+  "ingester": {},
+  "limits_config": {
+    "max_cache_freshness_per_query": "10m",
+    "query_timeout": "300s",
+    "reject_old_samples": true,
+    "reject_old_samples_max_age": "168h",
+    "split_queries_by_interval": "15m"
+  },
+  "memberlistConfig": {},
+  "memcached": {
+    "chunk_cache": {
+      "batch_size": 256,
+      "enabled": false,
+      "host": "",
+      "parallelism": 10,
+      "service": "memcached-client"
+    },
+    "results_cache": {
+      "default_validity": "12h",
+      "enabled": false,
+      "host": "",
+      "service": "memcached-client",
+      "timeout": "500ms"
+    }
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "podSecurityContext": {
+    "fsGroup": 10001,
+    "runAsGroup": 10001,
+    "runAsNonRoot": true,
+    "runAsUser": 10001
+  },
+  "querier": {},
+  "query_range": {},
+  "query_scheduler": {},
+  "readinessProbe": {
+    "httpGet": {
+      "path": "/ready",
+      "port": "http-metrics"
+    },
+    "initialDelaySeconds": 30,
+    "timeoutSeconds": 1
+  },
+  "revisionHistoryLimit": 10,
+  "rulerConfig": {},
+  "runtimeConfig": {},
+  "schemaConfig": {},
+  "server": {
+    "grpc_listen_port": 9095,
+    "http_listen_port": 3100,
+    "http_server_read_timeout": "600s",
+    "http_server_write_timeout": "600s"
+  },
+  "serviceAnnotations": {},
+  "serviceLabels": {},
+  "storage": {
+    "azure": {
+      "accountKey": null,
+      "accountName": null,
+      "connectionString": null,
+      "endpointSuffix": null,
+      "requestTimeout": null,
+      "useFederatedToken": false,
+      "useManagedIdentity": false,
+      "userAssignedId": null
+    },
+    "bucketNames": {
+      "admin": "admin",
+      "chunks": "chunks",
+      "ruler": "ruler"
+    },
+    "filesystem": {
+      "chunks_directory": "/var/loki/chunks",
+      "rules_directory": "/var/loki/rules"
+    },
+    "gcs": {
+      "chunkBufferSize": 0,
+      "enableHttp2": true,
+      "requestTimeout": "0s"
+    },
+    "s3": {
+      "accessKeyId": null,
+      "backoff_config": {},
+      "endpoint": null,
+      "http_config": {},
+      "insecure": false,
+      "region": null,
+      "s3": null,
+      "s3ForcePathStyle": false,
+      "secretAccessKey": null,
+      "signatureVersion": null
+    },
+    "swift": {
+      "auth_url": null,
+      "auth_version": null,
+      "connect_timeout": null,
+      "container_name": null,
+      "domain_id": null,
+      "domain_name": null,
+      "internal": null,
+      "max_retries": null,
+      "password": null,
+      "project_domain_id": null,
+      "project_domain_name": null,
+      "project_id": null,
+      "project_name": null,
+      "region_name": null,
+      "request_timeout": null,
+      "user_domain_id": null,
+      "user_domain_name": null,
+      "user_id": null,
+      "username": null
+    },
+    "type": "s3"
+  },
+  "storage_config": {
+    "boltdb_shipper": {
+      "index_gateway_client": {
+        "server_address": "{{ include \"loki.indexGatewayAddress\" . }}"
+      }
+    },
+    "hedging": {
+      "at": "250ms",
+      "max_per_second": 20,
+      "up_to": 3
+    },
+    "tsdb_shipper": {
+      "index_gateway_client": {
+        "server_address": "{{ include \"loki.indexGatewayAddress\" . }}"
+      }
+    }
+  },
+  "structuredConfig": {},
+  "tenants": [],
+  "testSchemaConfig": {
+    "configs": [
+      {
+        "from": "2024-04-01",
+        "index": {
+          "period": "24h",
+          "prefix": "index_"
+        },
+        "object_store": "filesystem",
+        "schema": "v13",
+        "store": "tsdb"
+      }
+    ]
+  },
+  "tracing": {
+    "enabled": false
+  },
+  "useTestSchema": false
+}
+
+ + + + loki.analytics + object + Optional analytics configuration +
+{}
+
+ + + + loki.annotations + object + Common annotations for all deployments/StatefulSets +
+{}
+
+ + + + loki.commonConfig + object + Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration +
+{
+  "compactor_address": "{{ include \"loki.compactorAddress\" . }}",
+  "path_prefix": "/var/loki",
+  "replication_factor": 3
+}
+
+ + + + loki.compactor + object + Optional compactor configuration +
+{}
+
+ + + + loki.config + string + Config file contents for Loki +
+See values.yaml
+
+ + + + loki.configObjectName + string + The name of the object which Loki will mount as a volume containing the config. If the configStorageType is Secret, this will be the name of the Secret, if it is ConfigMap, this will be the name of the ConfigMap. The value will be passed through tpl. +
+"{{ include \"loki.name\" . }}"
+
+ + + + loki.configStorageType + string + Defines what kind of object stores the configuration, a ConfigMap or a Secret. In order to move sensitive information (such as credentials) from the ConfigMap/Secret to a more secure location (e.g. vault), it is possible to use [environment variables in the configuration](https://grafana.com/docs/loki/latest/configuration/#use-environment-variables-in-the-configuration). Such environment variables can be then stored in a separate Secret and injected via the global.extraEnvFrom value. For details about environment injection from a Secret please see [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-as-container-environment-variables). +
+"ConfigMap"
+
+ + + + loki.containerSecurityContext + object + The SecurityContext for Loki containers +
+{
+  "allowPrivilegeEscalation": false,
+  "capabilities": {
+    "drop": [
+      "ALL"
+    ]
+  },
+  "readOnlyRootFilesystem": true
+}
+
+ + + + loki.distributor + object + Optional distributor configuration +
+{}
+
+ + + + loki.enableServiceLinks + bool + Should enableServiceLinks be enabled. Default to enable +
+true
+
+ + + + loki.extraMemberlistConfig + object + Extra memberlist configuration +
+{}
+
+ + + + loki.generatedConfigObjectName + string + The name of the Secret or ConfigMap that will be created by this chart. If empty, no configmap or secret will be created. The value will be passed through tpl. +
+"{{ include \"loki.name\" . }}"
+
+ + + + loki.image.digest + string + Overrides the image tag with an image digest +
+null
+
+ + + + loki.image.pullPolicy + string + Docker image pull policy +
+"IfNotPresent"
+
+ + + + loki.image.registry + string + The Docker registry +
+"docker.io"
+
+ + + + loki.image.repository + string + Docker image repository +
+"grafana/loki"
+
+ + + + loki.image.tag + string + Overrides the image tag whose default is the chart's appVersion TODO: needed for 3rd target backend functionality revert to null or latest once this behavior is relased +
+null
+
+ + + + loki.index_gateway + object + Optional index gateway configuration +
+{
+  "mode": "simple"
+}
+
+ + + + loki.ingester + object + Optional ingester configuration +
+{}
+
+ + + + loki.limits_config + object + Limits config +
+{
+  "max_cache_freshness_per_query": "10m",
+  "query_timeout": "300s",
+  "reject_old_samples": true,
+  "reject_old_samples_max_age": "168h",
+  "split_queries_by_interval": "15m"
+}
+
+ + + + loki.memberlistConfig + object + memberlist configuration (overrides embedded default) +
+{}
+
+ + + + loki.memcached + object + Configure memcached as an external cache for chunk and results cache. Disabled by default must enable and specify a host for each cache you would like to use. +
+{
+  "chunk_cache": {
+    "batch_size": 256,
+    "enabled": false,
+    "host": "",
+    "parallelism": 10,
+    "service": "memcached-client"
+  },
+  "results_cache": {
+    "default_validity": "12h",
+    "enabled": false,
+    "host": "",
+    "service": "memcached-client",
+    "timeout": "500ms"
+  }
+}
+
+ + + + loki.podAnnotations + object + Common annotations for all pods +
+{}
+
+ + + + loki.podLabels + object + Common labels for all pods +
+{}
+
+ + + + loki.podSecurityContext + object + The SecurityContext for Loki pods +
+{
+  "fsGroup": 10001,
+  "runAsGroup": 10001,
+  "runAsNonRoot": true,
+  "runAsUser": 10001
+}
+
+ + + + loki.querier + object + Optional querier configuration +
+{}
+
+ + + + loki.query_range + object + Optional querier configuration +
+{}
+
+ + + + loki.query_scheduler + object + Additional query scheduler config +
+{}
+
+ + + + loki.revisionHistoryLimit + int + The number of old ReplicaSets to retain to allow rollback +
+10
+
+ + + + loki.rulerConfig + object + Check https://grafana.com/docs/loki/latest/configuration/#ruler for more info on configuring ruler +
+{}
+
+ + + + loki.runtimeConfig + object + Provides a reloadable runtime configuration file for some specific configuration +
+{}
+
+ + + + loki.schemaConfig + object + Check https://grafana.com/docs/loki/latest/configuration/#schema_config for more info on how to configure schemas +
+{}
+
+ + + + loki.server + object + Check https://grafana.com/docs/loki/latest/configuration/#server for more info on the server configuration. +
+{
+  "grpc_listen_port": 9095,
+  "http_listen_port": 3100,
+  "http_server_read_timeout": "600s",
+  "http_server_write_timeout": "600s"
+}
+
+ + + + loki.serviceAnnotations + object + Common annotations for all services +
+{}
+
+ + + + loki.serviceLabels + object + Common labels for all services +
+{}
+
+ + + + loki.storage + object + Storage config. Providing this will automatically populate all necessary storage configs in the templated config. +
+{
+  "azure": {
+    "accountKey": null,
+    "accountName": null,
+    "connectionString": null,
+    "endpointSuffix": null,
+    "requestTimeout": null,
+    "useFederatedToken": false,
+    "useManagedIdentity": false,
+    "userAssignedId": null
+  },
+  "bucketNames": {
+    "admin": "admin",
+    "chunks": "chunks",
+    "ruler": "ruler"
+  },
+  "filesystem": {
+    "chunks_directory": "/var/loki/chunks",
+    "rules_directory": "/var/loki/rules"
+  },
+  "gcs": {
+    "chunkBufferSize": 0,
+    "enableHttp2": true,
+    "requestTimeout": "0s"
+  },
+  "s3": {
+    "accessKeyId": null,
+    "backoff_config": {},
+    "endpoint": null,
+    "http_config": {},
+    "insecure": false,
+    "region": null,
+    "s3": null,
+    "s3ForcePathStyle": false,
+    "secretAccessKey": null,
+    "signatureVersion": null
+  },
+  "swift": {
+    "auth_url": null,
+    "auth_version": null,
+    "connect_timeout": null,
+    "container_name": null,
+    "domain_id": null,
+    "domain_name": null,
+    "internal": null,
+    "max_retries": null,
+    "password": null,
+    "project_domain_id": null,
+    "project_domain_name": null,
+    "project_id": null,
+    "project_name": null,
+    "region_name": null,
+    "request_timeout": null,
+    "user_domain_id": null,
+    "user_domain_name": null,
+    "user_id": null,
+    "username": null
+  },
+  "type": "s3"
+}
+
+ + + + loki.storage.s3.backoff_config + object + Check https://grafana.com/docs/loki/latest/configure/#s3_storage_config for more info on how to provide a backoff_config +
+{}
+
+ + + + loki.storage_config + object + Additional storage config +
+{
+  "boltdb_shipper": {
+    "index_gateway_client": {
+      "server_address": "{{ include \"loki.indexGatewayAddress\" . }}"
+    }
+  },
+  "hedging": {
+    "at": "250ms",
+    "max_per_second": 20,
+    "up_to": 3
+  },
+  "tsdb_shipper": {
+    "index_gateway_client": {
+      "server_address": "{{ include \"loki.indexGatewayAddress\" . }}"
+    }
+  }
+}
+
+ + + + loki.structuredConfig + object + Structured loki configuration, takes precedence over `loki.config`, `loki.schemaConfig`, `loki.storageConfig` +
+{}
+
+ + + + loki.tenants + list + Tenants list to be created on nginx htpasswd file, with name and password keys +
+[]
+
+ + + + loki.tracing + object + Enable tracing +
+{
+  "enabled": false
+}
+
+ + + + loki.useTestSchema + bool + a real Loki install requires a proper schemaConfig defined above this, however for testing or playing around you can enable useTestSchema +
+false
+
+ + + + lokiCanary.annotations + object + Additional annotations for the `loki-canary` Daemonset +
+{}
+
+ + + + lokiCanary.dnsConfig + object + DNS config for canary pods +
+{}
+
+ + + + lokiCanary.enabled + bool + +
+true
+
+ + + + lokiCanary.extraArgs + list + Additional CLI arguments for the `loki-canary' command +
+[]
+
+ + + + lokiCanary.extraEnv + list + Environment variables to add to the canary pods +
+[]
+
+ + + + lokiCanary.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the canary pods +
+[]
+
+ + + + lokiCanary.extraVolumeMounts + list + Volume mounts to add to the canary pods +
+[]
+
+ + + + lokiCanary.extraVolumes + list + Volumes to add to the canary pods +
+[]
+
+ + + + lokiCanary.image + object + Image to use for loki canary +
+{
+  "digest": null,
+  "pullPolicy": "IfNotPresent",
+  "registry": "docker.io",
+  "repository": "grafana/loki-canary",
+  "tag": null
+}
+
+ + + + lokiCanary.image.digest + string + Overrides the image tag with an image digest +
+null
+
+ + + + lokiCanary.image.pullPolicy + string + Docker image pull policy +
+"IfNotPresent"
+
+ + + + lokiCanary.image.registry + string + The Docker registry +
+"docker.io"
+
+ + + + lokiCanary.image.repository + string + Docker image repository +
+"grafana/loki-canary"
+
+ + + + lokiCanary.image.tag + string + Overrides the image tag whose default is the chart's appVersion +
+null
+
+ + + + lokiCanary.labelname + string + The name of the label to look for at loki when doing the checks. +
+"pod"
+
+ + + + lokiCanary.nodeSelector + object + Node selector for canary pods +
+{}
+
+ + + + lokiCanary.podLabels + object + Additional labels for each `loki-canary` pod +
+{}
+
+ + + + lokiCanary.priorityClassName + string + The name of the PriorityClass for loki-canary pods +
+null
+
+ + + + lokiCanary.push + bool + +
+true
+
+ + + + lokiCanary.resources + object + Resource requests and limits for the canary +
+{}
+
+ + + + lokiCanary.service.annotations + object + Annotations for loki-canary Service +
+{}
+
+ + + + lokiCanary.service.labels + object + Additional labels for loki-canary Service +
+{}
+
+ + + + lokiCanary.tolerations + list + Tolerations for canary pods +
+[]
+
+ + + + lokiCanary.updateStrategy + object + Update strategy for the `loki-canary` Daemonset pods +
+{
+  "rollingUpdate": {
+    "maxUnavailable": 1
+  },
+  "type": "RollingUpdate"
+}
+
+ + + + memberlist.service.publishNotReadyAddresses + bool + +
+false
+
+ + + + memcached.containerSecurityContext + object + The SecurityContext for memcached containers +
+{
+  "allowPrivilegeEscalation": false,
+  "capabilities": {
+    "drop": [
+      "ALL"
+    ]
+  },
+  "readOnlyRootFilesystem": true
+}
+
+ + + + memcached.image.pullPolicy + string + Memcached Docker image pull policy +
+"IfNotPresent"
+
+ + + + memcached.image.repository + string + Memcached Docker image repository +
+"memcached"
+
+ + + + memcached.image.tag + string + Memcached Docker image tag +
+"1.6.23-alpine"
+
+ + + + memcached.podSecurityContext + object + The SecurityContext override for memcached pods +
+{}
+
+ + + + memcached.priorityClassName + string + The name of the PriorityClass for memcached pods +
+null
+
+ + + + memcachedExporter.containerSecurityContext + object + The SecurityContext for memcached exporter containers +
+{
+  "allowPrivilegeEscalation": false,
+  "capabilities": {
+    "drop": [
+      "ALL"
+    ]
+  },
+  "readOnlyRootFilesystem": true
+}
+
+ + + + memcachedExporter.enabled + bool + Whether memcached metrics should be exported +
+true
+
+ + + + memcachedExporter.extraArgs + object + Extra args to add to the exporter container. Example: extraArgs: memcached.tls.enable: true memcached.tls.cert-file: /certs/cert.crt memcached.tls.key-file: /certs/cert.key memcached.tls.ca-file: /certs/ca.crt memcached.tls.insecure-skip-verify: false memcached.tls.server-name: memcached +
+{}
+
+ + + + memcachedExporter.image.pullPolicy + string + +
+"IfNotPresent"
+
+ + + + memcachedExporter.image.repository + string + +
+"prom/memcached-exporter"
+
+ + + + memcachedExporter.image.tag + string + +
+"v0.14.2"
+
+ + + + memcachedExporter.resources.limits + object + +
+{}
+
+ + + + memcachedExporter.resources.requests + object + +
+{}
+
+ + + + migrate + object + Options that may be necessary when performing a migration from another helm chart +
+{
+  "fromDistributed": {
+    "enabled": false,
+    "memberlistService": ""
+  }
+}
+
+ + + + migrate.fromDistributed + object + When migrating from a distributed chart like loki-distributed or enterprise-logs +
+{
+  "enabled": false,
+  "memberlistService": ""
+}
+
+ + + + migrate.fromDistributed.enabled + bool + Set to true if migrating from a distributed helm chart +
+false
+
+ + + + migrate.fromDistributed.memberlistService + string + If migrating from a distributed service, provide the distributed deployment's memberlist service DNS so the new deployment can join its ring. +
+""
+
+ + + + minio + object + Configuration for the minio subchart +
+{
+  "buckets": [
+    {
+      "name": "chunks",
+      "policy": "none",
+      "purge": false
+    },
+    {
+      "name": "ruler",
+      "policy": "none",
+      "purge": false
+    },
+    {
+      "name": "admin",
+      "policy": "none",
+      "purge": false
+    }
+  ],
+  "drivesPerNode": 2,
+  "enabled": false,
+  "persistence": {
+    "size": "5Gi"
+  },
+  "replicas": 1,
+  "resources": {
+    "requests": {
+      "cpu": "100m",
+      "memory": "128Mi"
+    }
+  },
+  "rootPassword": "supersecret",
+  "rootUser": "enterprise-logs"
+}
+
+ + + + monitoring + object + DEPRECATED Monitoring section determines which monitoring features to enable, this section is being replaced by https://github.com/grafana/meta-monitoring-chart +
+{
+  "dashboards": {
+    "annotations": {},
+    "enabled": false,
+    "labels": {
+      "grafana_dashboard": "1"
+    },
+    "namespace": null
+  },
+  "rules": {
+    "additionalGroups": [],
+    "additionalRuleLabels": {},
+    "alerting": true,
+    "annotations": {},
+    "disabled": {},
+    "enabled": false,
+    "labels": {},
+    "namespace": null
+  },
+  "selfMonitoring": {
+    "enabled": false,
+    "grafanaAgent": {
+      "annotations": {},
+      "enableConfigReadAPI": false,
+      "installOperator": false,
+      "labels": {},
+      "priorityClassName": null,
+      "resources": {},
+      "tolerations": []
+    },
+    "logsInstance": {
+      "annotations": {},
+      "clients": null,
+      "labels": {}
+    },
+    "podLogs": {
+      "additionalPipelineStages": [],
+      "annotations": {},
+      "apiVersion": "monitoring.grafana.com/v1alpha1",
+      "labels": {},
+      "relabelings": []
+    },
+    "tenant": {
+      "name": "self-monitoring",
+      "secretNamespace": "{{ .Release.Namespace }}"
+    }
+  },
+  "serviceMonitor": {
+    "annotations": {},
+    "enabled": false,
+    "interval": "15s",
+    "labels": {},
+    "metricRelabelings": [],
+    "metricsInstance": {
+      "annotations": {},
+      "enabled": true,
+      "labels": {},
+      "remoteWrite": null
+    },
+    "namespaceSelector": {},
+    "relabelings": [],
+    "scheme": "http",
+    "scrapeTimeout": null,
+    "tlsConfig": null
+  }
+}
+
+ + + + monitoring.dashboards.annotations + object + Additional annotations for the dashboards ConfigMap +
+{}
+
+ + + + monitoring.dashboards.enabled + bool + If enabled, create configmap with dashboards for monitoring Loki +
+false
+
+ + + + monitoring.dashboards.labels + object + Labels for the dashboards ConfigMap +
+{
+  "grafana_dashboard": "1"
+}
+
+ + + + monitoring.dashboards.namespace + string + Alternative namespace to create dashboards ConfigMap in +
+null
+
+ + + + monitoring.rules.additionalGroups + list + Additional groups to add to the rules file +
+[]
+
+ + + + monitoring.rules.additionalRuleLabels + object + Additional labels for PrometheusRule alerts +
+{}
+
+ + + + monitoring.rules.alerting + bool + Include alerting rules +
+true
+
+ + + + monitoring.rules.annotations + object + Additional annotations for the rules PrometheusRule resource +
+{}
+
+ + + + monitoring.rules.disabled + object + If you disable all the alerts and keep .monitoring.rules.alerting set to true, the chart will fail to render. +
+{}
+
+ + + + monitoring.rules.enabled + bool + If enabled, create PrometheusRule resource with Loki recording rules +
+false
+
+ + + + monitoring.rules.labels + object + Additional labels for the rules PrometheusRule resource +
+{}
+
+ + + + monitoring.rules.namespace + string + Alternative namespace to create PrometheusRule resources in +
+null
+
+ + + + monitoring.selfMonitoring.grafanaAgent.annotations + object + Grafana Agent annotations +
+{}
+
+ + + + monitoring.selfMonitoring.grafanaAgent.enableConfigReadAPI + bool + Enable the config read api on port 8080 of the agent +
+false
+
+ + + + monitoring.selfMonitoring.grafanaAgent.installOperator + bool + Controls whether to install the Grafana Agent Operator and its CRDs. Note that helm will not install CRDs if this flag is enabled during an upgrade. In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds +
+false
+
+ + + + monitoring.selfMonitoring.grafanaAgent.labels + object + Additional Grafana Agent labels +
+{}
+
+ + + + monitoring.selfMonitoring.grafanaAgent.priorityClassName + string + The name of the PriorityClass for GrafanaAgent pods +
+null
+
+ + + + monitoring.selfMonitoring.grafanaAgent.resources + object + Resource requests and limits for the grafanaAgent pods +
+{}
+
+ + + + monitoring.selfMonitoring.grafanaAgent.tolerations + list + Tolerations for GrafanaAgent pods +
+[]
+
+ + + + monitoring.selfMonitoring.logsInstance.annotations + object + LogsInstance annotations +
+{}
+
+ + + + monitoring.selfMonitoring.logsInstance.clients + string + Additional clients for remote write +
+null
+
+ + + + monitoring.selfMonitoring.logsInstance.labels + object + Additional LogsInstance labels +
+{}
+
+ + + + monitoring.selfMonitoring.podLogs.additionalPipelineStages + list + Additional pipeline stages to process logs after scraping https://grafana.com/docs/agent/latest/operator/api/#pipelinestagespec-a-namemonitoringgrafanacomv1alpha1pipelinestagespeca +
+[]
+
+ + + + monitoring.selfMonitoring.podLogs.annotations + object + PodLogs annotations +
+{}
+
+ + + + monitoring.selfMonitoring.podLogs.apiVersion + string + PodLogs version +
+"monitoring.grafana.com/v1alpha1"
+
+ + + + monitoring.selfMonitoring.podLogs.labels + object + Additional PodLogs labels +
+{}
+
+ + + + monitoring.selfMonitoring.podLogs.relabelings + list + PodLogs relabel configs to apply to samples before scraping https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig +
+[]
+
+ + + + monitoring.selfMonitoring.tenant + object + Tenant to use for self monitoring +
+{
+  "name": "self-monitoring",
+  "secretNamespace": "{{ .Release.Namespace }}"
+}
+
+ + + + monitoring.selfMonitoring.tenant.name + string + Name of the tenant +
+"self-monitoring"
+
+ + + + monitoring.selfMonitoring.tenant.secretNamespace + string + Namespace to create additional tenant token secret in. Useful if your Grafana instance is in a separate namespace. Token will still be created in the canary namespace. +
+"{{ .Release.Namespace }}"
+
+ + + + monitoring.serviceMonitor.annotations + object + ServiceMonitor annotations +
+{}
+
+ + + + monitoring.serviceMonitor.enabled + bool + If enabled, ServiceMonitor resources for Prometheus Operator are created +
+false
+
+ + + + monitoring.serviceMonitor.interval + string + ServiceMonitor scrape interval Default is 15s because included recording rules use a 1m rate, and scrape interval needs to be at least 1/4 rate interval. +
+"15s"
+
+ + + + monitoring.serviceMonitor.labels + object + Additional ServiceMonitor labels +
+{}
+
+ + + + monitoring.serviceMonitor.metricRelabelings + list + ServiceMonitor metric relabel configs to apply to samples before ingestion https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint +
+[]
+
+ + + + monitoring.serviceMonitor.metricsInstance + object + If defined, will create a MetricsInstance for the Grafana Agent Operator. +
+{
+  "annotations": {},
+  "enabled": true,
+  "labels": {},
+  "remoteWrite": null
+}
+
+ + + + monitoring.serviceMonitor.metricsInstance.annotations + object + MetricsInstance annotations +
+{}
+
+ + + + monitoring.serviceMonitor.metricsInstance.enabled + bool + If enabled, MetricsInstance resources for Grafana Agent Operator are created +
+true
+
+ + + + monitoring.serviceMonitor.metricsInstance.labels + object + Additional MetricsInstance labels +
+{}
+
+ + + + monitoring.serviceMonitor.metricsInstance.remoteWrite + string + If defined a MetricsInstance will be created to remote write metrics. +
+null
+
+ + + + monitoring.serviceMonitor.namespaceSelector + object + Namespace selector for ServiceMonitor resources +
+{}
+
+ + + + monitoring.serviceMonitor.relabelings + list + ServiceMonitor relabel configs to apply to samples before scraping https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig +
+[]
+
+ + + + monitoring.serviceMonitor.scheme + string + ServiceMonitor will use http by default, but you can pick https as well +
+"http"
+
+ + + + monitoring.serviceMonitor.scrapeTimeout + string + ServiceMonitor scrape timeout in Go duration format (e.g. 15s) +
+null
+
+ + + + monitoring.serviceMonitor.tlsConfig + string + ServiceMonitor will use these tlsConfig settings to make the health check requests +
+null
+
+ + + + nameOverride + string + Overrides the chart's name +
+null
+
+ + + + networkPolicy.alertmanager.namespaceSelector + object + Specifies the namespace the alertmanager is running in +
+{}
+
+ + + + networkPolicy.alertmanager.podSelector + object + Specifies the alertmanager Pods. As this is cross-namespace communication, you also need the namespaceSelector. +
+{}
+
+ + + + networkPolicy.alertmanager.port + int + Specify the alertmanager port used for alerting +
+9093
+
+ + + + networkPolicy.discovery.namespaceSelector + object + Specifies the namespace the discovery Pods are running in +
+{}
+
+ + + + networkPolicy.discovery.podSelector + object + Specifies the Pods labels used for discovery. As this is cross-namespace communication, you also need the namespaceSelector. +
+{}
+
+ + + + networkPolicy.discovery.port + int + Specify the port used for discovery +
+null
+
+ + + + networkPolicy.egressKubeApiserver.enabled + bool + Enable additional cilium egress rules to kube-apiserver for backend. +
+false
+
+ + + + networkPolicy.egressWorld.enabled + bool + Enable additional cilium egress rules to external world for write, read and backend. +
+false
+
+ + + + networkPolicy.enabled + bool + Specifies whether Network Policies should be created +
+false
+
+ + + + networkPolicy.externalStorage.cidrs + list + Specifies specific network CIDRs you want to limit access to +
+[]
+
+ + + + networkPolicy.externalStorage.ports + list + Specify the port used for external storage, e.g. AWS S3 +
+[]
+
+ + + + networkPolicy.flavor + string + Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) or Cilium Network Policies (flavor: cilium) +
+"kubernetes"
+
+ + + + networkPolicy.ingress.namespaceSelector + object + Specifies the namespaces which are allowed to access the http port +
+{}
+
+ + + + networkPolicy.ingress.podSelector + object + Specifies the Pods which are allowed to access the http port. As this is cross-namespace communication, you also need the namespaceSelector. +
+{}
+
+ + + + networkPolicy.metrics.cidrs + list + Specifies specific network CIDRs which are allowed to access the metrics port. In case you use namespaceSelector, you also have to specify your kubelet networks here. The metrics ports are also used for probes. +
+[]
+
+ + + + networkPolicy.metrics.namespaceSelector + object + Specifies the namespaces which are allowed to access the metrics port +
+{}
+
+ + + + networkPolicy.metrics.podSelector + object + Specifies the Pods which are allowed to access the metrics port. As this is cross-namespace communication, you also need the namespaceSelector. +
+{}
+
+ + + + patternIngester + object + Configuration for the pattern ingester +
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "pattern-ingester"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "command": null,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "livenessProbe": {},
+  "nodeSelector": {},
+  "persistence": {
+    "annotations": {},
+    "claims": [
+      {
+        "name": "data",
+        "size": "10Gi",
+        "storageClass": null
+      }
+    ],
+    "enableStatefulSetAutoDeletePVC": false,
+    "enabled": false,
+    "size": "10Gi",
+    "storageClass": null,
+    "whenDeleted": "Retain",
+    "whenScaled": "Retain"
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "readinessProbe": {},
+  "replicas": 0,
+  "resources": {},
+  "serviceAccount": {
+    "annotations": {},
+    "automountServiceAccountToken": true,
+    "create": false,
+    "imagePullSecrets": [],
+    "name": null
+  },
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": []
+}
+
+ + + + patternIngester.affinity + object + Affinity for pattern ingester pods. +
+Hard node anti-affinity
+
+ + + + patternIngester.appProtocol + object + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" +
+{
+  "grpc": ""
+}
+
+ + + + patternIngester.command + string + Command to execute instead of defined in Docker image +
+null
+
+ + + + patternIngester.extraArgs + list + Additional CLI args for the pattern ingester +
+[]
+
+ + + + patternIngester.extraContainers + list + Containers to add to the pattern ingester pods +
+[]
+
+ + + + patternIngester.extraEnv + list + Environment variables to add to the pattern ingester pods +
+[]
+
+ + + + patternIngester.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the pattern ingester pods +
+[]
+
+ + + + patternIngester.extraVolumeMounts + list + Volume mounts to add to the pattern ingester pods +
+[]
+
+ + + + patternIngester.extraVolumes + list + Volumes to add to the pattern ingester pods +
+[]
+
+ + + + patternIngester.hostAliases + list + hostAliases to add +
+[]
+
+ + + + patternIngester.image.registry + string + The Docker registry for the pattern ingester image. Overrides `loki.image.registry` +
+null
+
+ + + + patternIngester.image.repository + string + Docker image repository for the pattern ingester image. Overrides `loki.image.repository` +
+null
+
+ + + + patternIngester.image.tag + string + Docker image tag for the pattern ingester image. Overrides `loki.image.tag` +
+null
+
+ + + + patternIngester.initContainers + list + Init containers to add to the pattern ingester pods +
+[]
+
+ + + + patternIngester.livenessProbe + object + liveness probe settings for ingester pods. If empty use `loki.livenessProbe` +
+{}
+
+ + + + patternIngester.nodeSelector + object + Node selector for pattern ingester pods +
+{}
+
+ + + + patternIngester.persistence.annotations + object + Annotations for pattern ingester PVCs +
+{}
+
+ + + + patternIngester.persistence.claims + list + List of the pattern ingester PVCs +
+
+
+ + + + patternIngester.persistence.enableStatefulSetAutoDeletePVC + bool + Enable StatefulSetAutoDeletePVC feature +
+false
+
+ + + + patternIngester.persistence.enabled + bool + Enable creating PVCs for the pattern ingester +
+false
+
+ + + + patternIngester.persistence.size + string + Size of persistent disk +
+"10Gi"
+
+ + + + patternIngester.persistence.storageClass + string + Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). +
+null
+
+ + + + patternIngester.podAnnotations + object + Annotations for pattern ingester pods +
+{}
+
+ + + + patternIngester.podLabels + object + Labels for pattern ingester pods +
+{}
+
+ + + + patternIngester.priorityClassName + string + The name of the PriorityClass for pattern ingester pods +
+null
+
+ + + + patternIngester.readinessProbe + object + readiness probe settings for ingester pods. If empty, use `loki.readinessProbe` +
+{}
+
+ + + + patternIngester.replicas + int + Number of replicas for the pattern ingester +
+0
+
+ + + + patternIngester.resources + object + Resource requests and limits for the pattern ingester +
+{}
+
+ + + + patternIngester.serviceAccount.annotations + object + Annotations for the pattern ingester service account +
+{}
+
+ + + + patternIngester.serviceAccount.automountServiceAccountToken + bool + Set this toggle to false to opt out of automounting API credentials for the service account +
+true
+
+ + + + patternIngester.serviceAccount.imagePullSecrets + list + Image pull secrets for the pattern ingester service account +
+[]
+
+ + + + patternIngester.serviceAccount.name + string + The name of the ServiceAccount to use for the pattern ingester. If not set and create is true, a name is generated by appending "-pattern-ingester" to the common ServiceAccount. +
+null
+
+ + + + patternIngester.serviceLabels + object + Labels for pattern ingester service +
+{}
+
+ + + + patternIngester.terminationGracePeriodSeconds + int + Grace period to allow the pattern ingester to shutdown before it is killed +
+30
+
+ + + + patternIngester.tolerations + list + Tolerations for pattern ingester pods +
+[]
+
+ + + + querier + object + Configuration for the querier +
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "querier"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "autoscaling": {
+    "behavior": {
+      "enabled": false,
+      "scaleDown": {},
+      "scaleUp": {}
+    },
+    "customMetrics": [],
+    "enabled": false,
+    "maxReplicas": 3,
+    "minReplicas": 1,
+    "targetCPUUtilizationPercentage": 60,
+    "targetMemoryUtilizationPercentage": null
+  },
+  "command": null,
+  "dnsConfig": {},
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "maxSurge": 0,
+  "maxUnavailable": null,
+  "nodeSelector": {},
+  "persistence": {
+    "annotations": {},
+    "enabled": false,
+    "size": "10Gi",
+    "storageClass": null
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "replicas": 0,
+  "resources": {},
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": [],
+  "topologySpreadConstraints": [
+    {
+      "labelSelector": {
+        "matchLabels": {
+          "app.kubernetes.io/component": "querier"
+        }
+      },
+      "maxSkew": 1,
+      "topologyKey": "kubernetes.io/hostname",
+      "whenUnsatisfiable": "ScheduleAnyway"
+    }
+  ]
+}
+
+ + + + querier.affinity + object + Affinity for querier pods. +
+Hard node anti-affinity
+
+ + + + querier.appProtocol + object + Adds the appProtocol field to the querier service. This allows querier to work with istio protocol selection. +
+{
+  "grpc": ""
+}
+
+ + + + querier.appProtocol.grpc + string + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" +
+""
+
+ + + + querier.autoscaling.behavior.enabled + bool + Enable autoscaling behaviours +
+false
+
+ + + + querier.autoscaling.behavior.scaleDown + object + define scale down policies, must conform to HPAScalingRules +
+{}
+
+ + + + querier.autoscaling.behavior.scaleUp + object + define scale up policies, must conform to HPAScalingRules +
+{}
+
+ + + + querier.autoscaling.customMetrics + list + Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics) +
+[]
+
+ + + + querier.autoscaling.enabled + bool + Enable autoscaling for the querier, this is only used if `indexGateway.enabled: true` +
+false
+
+ + + + querier.autoscaling.maxReplicas + int + Maximum autoscaling replicas for the querier +
+3
+
+ + + + querier.autoscaling.minReplicas + int + Minimum autoscaling replicas for the querier +
+1
+
+ + + + querier.autoscaling.targetCPUUtilizationPercentage + int + Target CPU utilisation percentage for the querier +
+60
+
+ + + + querier.autoscaling.targetMemoryUtilizationPercentage + string + Target memory utilisation percentage for the querier +
+null
+
+ + + + querier.command + string + Command to execute instead of defined in Docker image +
+null
+
+ + + + querier.dnsConfig + object + DNSConfig for querier pods +
+{}
+
+ + + + querier.extraArgs + list + Additional CLI args for the querier +
+[]
+
+ + + + querier.extraContainers + list + Containers to add to the querier pods +
+[]
+
+ + + + querier.extraEnv + list + Environment variables to add to the querier pods +
+[]
+
+ + + + querier.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the querier pods +
+[]
+
+ + + + querier.extraVolumeMounts + list + Volume mounts to add to the querier pods +
+[]
+
+ + + + querier.extraVolumes + list + Volumes to add to the querier pods +
+[]
+
+ + + + querier.hostAliases + list + hostAliases to add +
+[]
+
+ + + + querier.image.registry + string + The Docker registry for the querier image. Overrides `loki.image.registry` +
+null
+
+ + + + querier.image.repository + string + Docker image repository for the querier image. Overrides `loki.image.repository` +
+null
+
+ + + + querier.image.tag + string + Docker image tag for the querier image. Overrides `loki.image.tag` +
+null
+
+ + + + querier.initContainers + list + Init containers to add to the querier pods +
+[]
+
+ + + + querier.maxSurge + int + Max Surge for querier pods +
+0
+
+ + + + querier.maxUnavailable + string + Pod Disruption Budget maxUnavailable +
+null
+
+ + + + querier.nodeSelector + object + Node selector for querier pods +
+{}
+
+ + + + querier.persistence.annotations + object + Annotations for querier PVCs +
+{}
+
+ + + + querier.persistence.enabled + bool + Enable creating PVCs for the querier cache +
+false
+
+ + + + querier.persistence.size + string + Size of persistent disk +
+"10Gi"
+
+ + + + querier.persistence.storageClass + string + Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). +
+null
+
+ + + + querier.podAnnotations + object + Annotations for querier pods +
+{}
+
+ + + + querier.podLabels + object + Labels for querier pods +
+{}
+
+ + + + querier.priorityClassName + string + The name of the PriorityClass for querier pods +
+null
+
+ + + + querier.replicas + int + Number of replicas for the querier +
+0
+
+ + + + querier.resources + object + Resource requests and limits for the querier +
+{}
+
+ + + + querier.serviceLabels + object + Labels for querier service +
+{}
+
+ + + + querier.terminationGracePeriodSeconds + int + Grace period to allow the querier to shutdown before it is killed +
+30
+
+ + + + querier.tolerations + list + Tolerations for querier pods +
+[]
+
+ + + + querier.topologySpreadConstraints + list + topologySpread for querier pods. +
+Defaults to allow skew no more then 1 node
+
+ + + + queryFrontend + object + Configuration for the query-frontend +
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "query-frontend"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "autoscaling": {
+    "behavior": {
+      "enabled": false,
+      "scaleDown": {},
+      "scaleUp": {}
+    },
+    "customMetrics": [],
+    "enabled": false,
+    "maxReplicas": 3,
+    "minReplicas": 1,
+    "targetCPUUtilizationPercentage": 60,
+    "targetMemoryUtilizationPercentage": null
+  },
+  "command": null,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "maxUnavailable": null,
+  "nodeSelector": {},
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "replicas": 0,
+  "resources": {},
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": []
 }
 
- loki.querier + queryFrontend.affinity object - Optional querier configuration + Affinity for query-frontend pods. +
+Hard node anti-affinity
+
+ + + + queryFrontend.appProtocol + object + Adds the appProtocol field to the queryFrontend service. This allows queryFrontend to work with istio protocol selection. +
+{
+  "grpc": ""
+}
+
+ + + + queryFrontend.appProtocol.grpc + string + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" +
+""
+
+ + + + queryFrontend.autoscaling.behavior.enabled + bool + Enable autoscaling behaviours +
+false
+
+ + + + queryFrontend.autoscaling.behavior.scaleDown + object + define scale down policies, must conform to HPAScalingRules
 {}
 
- loki.query_scheduler + queryFrontend.autoscaling.behavior.scaleUp object - Additional query scheduler config + define scale up policies, must conform to HPAScalingRules
 {}
 
- loki.readinessProbe.httpGet.path + queryFrontend.autoscaling.customMetrics + list + Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics) +
+[]
+
+ + + + queryFrontend.autoscaling.enabled + bool + Enable autoscaling for the query-frontend +
+false
+
+ + + + queryFrontend.autoscaling.maxReplicas + int + Maximum autoscaling replicas for the query-frontend +
+3
+
+ + + + queryFrontend.autoscaling.minReplicas + int + Minimum autoscaling replicas for the query-frontend +
+1
+
+ + + + queryFrontend.autoscaling.targetCPUUtilizationPercentage + int + Target CPU utilisation percentage for the query-frontend +
+60
+
+ + + + queryFrontend.autoscaling.targetMemoryUtilizationPercentage + string + Target memory utilisation percentage for the query-frontend +
+null
+
+ + + + queryFrontend.command + string + Command to execute instead of defined in Docker image +
+null
+
+ + + + queryFrontend.extraArgs + list + Additional CLI args for the query-frontend +
+[]
+
+ + + + queryFrontend.extraContainers + list + Containers to add to the query-frontend pods +
+[]
+
+ + + + queryFrontend.extraEnv + list + Environment variables to add to the query-frontend pods +
+[]
+
+ + + + queryFrontend.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the query-frontend pods +
+[]
+
+ + + + queryFrontend.extraVolumeMounts + list + Volume mounts to add to the query-frontend pods +
+[]
+
+ + + + queryFrontend.extraVolumes + list + Volumes to add to the query-frontend pods +
+[]
+
+ + + + queryFrontend.hostAliases + list + hostAliases to add +
+[]
+
+ + + + queryFrontend.image.registry string - + The Docker registry for the query-frontend image. Overrides `loki.image.registry`
-"/ready"
+null
 
- loki.readinessProbe.httpGet.port + queryFrontend.image.repository string - + Docker image repository for the query-frontend image. Overrides `loki.image.repository`
-"http-metrics"
+null
 
- loki.readinessProbe.initialDelaySeconds - int - + queryFrontend.image.tag + string + Docker image tag for the query-frontend image. Overrides `loki.image.tag`
-30
+null
 
- loki.readinessProbe.timeoutSeconds - int - + queryFrontend.maxUnavailable + string + Pod Disruption Budget maxUnavailable
-1
+null
 
- loki.revisionHistoryLimit - int - The number of old ReplicaSets to retain to allow rollback + queryFrontend.nodeSelector + object + Node selector for query-frontend pods
-10
+{}
 
- loki.rulerConfig + queryFrontend.podAnnotations object - Check https://grafana.com/docs/loki/latest/configuration/#ruler for more info on configuring ruler + Annotations for query-frontend pods
 {}
 
- loki.runtimeConfig + queryFrontend.podLabels object - Provides a reloadable runtime configuration file for some specific configuration + Labels for query-frontend pods
 {}
 
- loki.schemaConfig - object - Check https://grafana.com/docs/loki/latest/configuration/#schema_config for more info on how to configure schemas + queryFrontend.priorityClassName + string + The name of the PriorityClass for query-frontend pods
-{}
+null
 
- loki.server - object - Check https://grafana.com/docs/loki/latest/configuration/#server for more info on the server configuration. + queryFrontend.replicas + int + Number of replicas for the query-frontend
-{
-  "grpc_listen_port": 9095,
-  "http_listen_port": 3100
-}
+0
 
- loki.serviceAnnotations + queryFrontend.resources object - Common annotations for all services + Resource requests and limits for the query-frontend
 {}
 
- loki.serviceLabels + queryFrontend.serviceLabels object - Common labels for all services + Labels for query-frontend service
 {}
 
- loki.storage - object - Storage config. Providing this will automatically populate all necessary storage configs in the templated config. + queryFrontend.terminationGracePeriodSeconds + int + Grace period to allow the query-frontend to shutdown before it is killed
-{
-  "azure": {
-    "accountKey": null,
-    "accountName": null,
-    "connectionString": null,
-    "endpointSuffix": null,
-    "requestTimeout": null,
-    "useFederatedToken": false,
-    "useManagedIdentity": false,
-    "userAssignedId": null
-  },
-  "bucketNames": {
-    "admin": "admin",
-    "chunks": "chunks",
-    "ruler": "ruler"
-  },
-  "filesystem": {
-    "chunks_directory": "/var/loki/chunks",
-    "rules_directory": "/var/loki/rules"
-  },
-  "gcs": {
-    "chunkBufferSize": 0,
-    "enableHttp2": true,
-    "requestTimeout": "0s"
-  },
-  "s3": {
-    "accessKeyId": null,
-    "backoff_config": {},
-    "endpoint": null,
-    "http_config": {},
-    "insecure": false,
-    "region": null,
-    "s3": null,
-    "s3ForcePathStyle": false,
-    "secretAccessKey": null,
-    "signatureVersion": null
-  },
-  "swift": {
-    "auth_url": null,
-    "auth_version": null,
-    "connect_timeout": null,
-    "container_name": null,
-    "domain_id": null,
-    "domain_name": null,
-    "internal": null,
-    "max_retries": null,
-    "password": null,
-    "project_domain_id": null,
-    "project_domain_name": null,
-    "project_id": null,
-    "project_name": null,
-    "region_name": null,
-    "request_timeout": null,
-    "user_domain_id": null,
-    "user_domain_name": null,
-    "user_id": null,
-    "username": null
-  },
-  "type": "s3"
-}
+30
 
- loki.storage.s3.backoff_config - object - Check https://grafana.com/docs/loki/latest/configure/#s3_storage_config for more info on how to provide a backoff_config + queryFrontend.tolerations + list + Tolerations for query-frontend pods
-{}
+[]
 
- loki.storage_config + queryScheduler object - Additional storage config + Configuration for the query-scheduler
 {
-  "hedging": {
-    "at": "250ms",
-    "max_per_second": 20,
-    "up_to": 3
-  }
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "query-scheduler"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "maxUnavailable": 1,
+  "nodeSelector": {},
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "replicas": 0,
+  "resources": {},
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": []
 }
 
- loki.structuredConfig + queryScheduler.affinity object - Structured loki configuration, takes precedence over `loki.config`, `loki.schemaConfig`, `loki.storageConfig` -
-{}
-
- - - - loki.tenants - list - Tenants list to be created on nginx htpasswd file, with name and password keys -
-[]
+			Affinity for query-scheduler pods.
+			
+Hard node anti-affinity
 
- loki.tracing + queryScheduler.appProtocol object - Enable tracing + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
 {
-  "enabled": false
+  "grpc": ""
 }
 
- memberlist.service.publishNotReadyAddresses - bool - + queryScheduler.extraArgs + list + Additional CLI args for the query-scheduler
-false
+[]
 
- migrate - object - Options that may be necessary when performing a migration from another helm chart + queryScheduler.extraContainers + list + Containers to add to the query-scheduler pods
-{
-  "fromDistributed": {
-    "enabled": false,
-    "memberlistService": ""
-  }
-}
+[]
 
- migrate.fromDistributed - object - When migrating from a distributed chart like loki-distributed or enterprise-logs + queryScheduler.extraEnv + list + Environment variables to add to the query-scheduler pods
-{
-  "enabled": false,
-  "memberlistService": ""
-}
+[]
 
- migrate.fromDistributed.enabled - bool - Set to true if migrating from a distributed helm chart + queryScheduler.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the query-scheduler pods
-false
+[]
 
- migrate.fromDistributed.memberlistService - string - If migrating from a distributed service, provide the distributed deployment's memberlist service DNS so the new deployment can join its ring. + queryScheduler.extraVolumeMounts + list + Volume mounts to add to the query-scheduler pods
-""
+[]
 
- minio - object - ----------------------------------- + queryScheduler.extraVolumes + list + Volumes to add to the query-scheduler pods
-{
-  "buckets": [
-    {
-      "name": "chunks",
-      "policy": "none",
-      "purge": false
-    },
-    {
-      "name": "ruler",
-      "policy": "none",
-      "purge": false
-    },
-    {
-      "name": "admin",
-      "policy": "none",
-      "purge": false
-    }
-  ],
-  "drivesPerNode": 2,
-  "enabled": false,
-  "persistence": {
-    "size": "5Gi"
-  },
-  "replicas": 1,
-  "resources": {
-    "requests": {
-      "cpu": "100m",
-      "memory": "128Mi"
-    }
-  },
-  "rootPassword": "supersecret",
-  "rootUser": "enterprise-logs"
-}
+[]
 
- monitoring.dashboards.annotations - object - Additional annotations for the dashboards ConfigMap + queryScheduler.hostAliases + list + hostAliases to add
-{}
+[]
 
- monitoring.dashboards.enabled - bool - If enabled, create configmap with dashboards for monitoring Loki + queryScheduler.image.registry + string + The Docker registry for the query-scheduler image. Overrides `loki.image.registry`
-true
+null
 
- monitoring.dashboards.labels - object - Labels for the dashboards ConfigMap + queryScheduler.image.repository + string + Docker image repository for the query-scheduler image. Overrides `loki.image.repository`
-{
-  "grafana_dashboard": "1"
-}
+null
 
- monitoring.dashboards.namespace + queryScheduler.image.tag string - Alternative namespace to create dashboards ConfigMap in + Docker image tag for the query-scheduler image. Overrides `loki.image.tag`
 null
 
- monitoring.lokiCanary.annotations + queryScheduler.maxUnavailable + int + Pod Disruption Budget maxUnavailable +
+1
+
+ + + + queryScheduler.nodeSelector object - Additional annotations for the `loki-canary` Daemonset + Node selector for query-scheduler pods
 {}
 
- monitoring.lokiCanary.dnsConfig + queryScheduler.podAnnotations object - DNS config for canary pods + Annotations for query-scheduler pods
 {}
 
- monitoring.lokiCanary.enabled - bool - + queryScheduler.podLabels + object + Labels for query-scheduler pods
-true
+{}
 
- monitoring.lokiCanary.extraArgs - list - Additional CLI arguments for the `loki-canary' command + queryScheduler.priorityClassName + string + The name of the PriorityClass for query-scheduler pods
-[]
+null
 
- monitoring.lokiCanary.extraEnv - list - Environment variables to add to the canary pods + queryScheduler.replicas + int + Number of replicas for the query-scheduler. It should be lower than `-querier.max-concurrent` to avoid generating back-pressure in queriers; it's also recommended that this value evenly divides the latter
-[]
+0
 
- monitoring.lokiCanary.extraEnvFrom - list - Environment variables from secrets or configmaps to add to the canary pods + queryScheduler.resources + object + Resource requests and limits for the query-scheduler
-[]
+{}
 
- monitoring.lokiCanary.image + queryScheduler.serviceLabels object - Image to use for loki canary + Labels for query-scheduler service
-{
-  "digest": null,
-  "pullPolicy": "IfNotPresent",
-  "registry": "docker.io",
-  "repository": "grafana/loki-canary",
-  "tag": null
-}
+{}
 
- monitoring.lokiCanary.image.digest - string - Overrides the image tag with an image digest + queryScheduler.terminationGracePeriodSeconds + int + Grace period to allow the query-scheduler to shutdown before it is killed
-null
+30
 
- monitoring.lokiCanary.image.pullPolicy - string - Docker image pull policy + queryScheduler.tolerations + list + Tolerations for query-scheduler pods
-"IfNotPresent"
+[]
 
- monitoring.lokiCanary.image.registry - string - The Docker registry + rbac.namespaced + bool + Whether to install RBAC in the namespace only or cluster-wide. Useful if you want to watch ConfigMap globally.
-"docker.io"
+false
 
- monitoring.lokiCanary.image.repository - string - Docker image repository + rbac.pspAnnotations + object + Specify PSP annotations Ref: https://kubernetes.io/docs/reference/access-authn-authz/psp-to-pod-security-standards/#podsecuritypolicy-annotations
-"grafana/loki-canary"
+{}
 
- monitoring.lokiCanary.image.tag - string - Overrides the image tag whose default is the chart's appVersion + rbac.pspEnabled + bool + If pspEnabled true, a PodSecurityPolicy is created for K8s that use psp.
-null
+false
 
- monitoring.lokiCanary.labelname - string - The name of the label to look for at loki when doing the checks. + rbac.sccEnabled + bool + For OpenShift set pspEnabled to 'false' and sccEnabled to 'true' to use the SecurityContextConstraints.
-"pod"
+false
 
- monitoring.lokiCanary.nodeSelector + read object - Node selector for canary pods + Configuration for the read pod(s)
-{}
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "read"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "annotations": {},
+  "autoscaling": {
+    "behavior": {},
+    "enabled": false,
+    "maxReplicas": 6,
+    "minReplicas": 2,
+    "targetCPUUtilizationPercentage": 60,
+    "targetMemoryUtilizationPercentage": null
+  },
+  "dnsConfig": {},
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "legacyReadTarget": false,
+  "lifecycle": {},
+  "nodeSelector": {},
+  "persistence": {
+    "enableStatefulSetAutoDeletePVC": true,
+    "selector": null,
+    "size": "10Gi",
+    "storageClass": null
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "podManagementPolicy": "Parallel",
+  "priorityClassName": null,
+  "replicas": 3,
+  "resources": {},
+  "selectorLabels": {},
+  "service": {
+    "annotations": {},
+    "labels": {}
+  },
+  "targetModule": "read",
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": [],
+  "topologySpreadConstraints": []
+}
 
- monitoring.lokiCanary.podLabels + read.affinity object - Additional labels for each `loki-canary` pod -
-{}
+			Affinity for read pods.
+			
+Hard node anti-affinity
 
- monitoring.lokiCanary.priorityClassName - string - The name of the PriorityClass for loki-canary pods + read.annotations + object + Annotations for read deployment
-null
+{}
 
- monitoring.lokiCanary.resources + read.autoscaling.behavior object - Resource requests and limits for the canary + Behavior policies while scaling.
 {}
 
- monitoring.lokiCanary.service.annotations - object - Annotations for loki-canary Service + read.autoscaling.enabled + bool + Enable autoscaling for the read, this is only used if `queryIndex.enabled: true`
-{}
+false
 
- monitoring.lokiCanary.service.labels - object - Additional labels for loki-canary Service + read.autoscaling.maxReplicas + int + Maximum autoscaling replicas for the read
-{}
+6
 
- monitoring.lokiCanary.tolerations - list - Tolerations for canary pods + read.autoscaling.minReplicas + int + Minimum autoscaling replicas for the read
-[]
+2
 
- monitoring.lokiCanary.updateStrategy - object - Update strategy for the `loki-canary` Daemonset pods + read.autoscaling.targetCPUUtilizationPercentage + int + Target CPU utilisation percentage for the read
-{
-  "rollingUpdate": {
-    "maxUnavailable": 1
-  },
-  "type": "RollingUpdate"
-}
+60
 
- monitoring.rules.additionalGroups - list - Additional groups to add to the rules file + read.autoscaling.targetMemoryUtilizationPercentage + string + Target memory utilisation percentage for the read
-[]
+null
 
- monitoring.rules.additionalRuleLabels + read.dnsConfig object - Additional labels for PrometheusRule alerts + DNS config for read pods
 {}
 
- monitoring.rules.alerting - bool - Include alerting rules + read.extraArgs + list + Additional CLI args for the read
-true
+[]
 
- monitoring.rules.annotations - object - Additional annotations for the rules PrometheusRule resource + read.extraContainers + list + Containers to add to the read pods
-{}
+[]
 
- monitoring.rules.disabled - object - If you disable all the alerts and keep .monitoring.rules.alerting set to true, the chart will fail to render. + read.extraEnv + list + Environment variables to add to the read pods
-{}
+[]
 
- monitoring.rules.enabled - bool - If enabled, create PrometheusRule resource with Loki recording rules + read.extraEnvFrom + list + Environment variables from secrets or configmaps to add to the read pods
-true
+[]
 
- monitoring.rules.labels - object - Additional labels for the rules PrometheusRule resource + read.extraVolumeMounts + list + Volume mounts to add to the read pods
-{}
+[]
 
- monitoring.rules.namespace - string - Alternative namespace to create PrometheusRule resources in + read.extraVolumes + list + Volumes to add to the read pods
-null
+[]
 
- monitoring.selfMonitoring.enabled - bool - + read.image.registry + string + The Docker registry for the read image. Overrides `loki.image.registry`
-true
+null
 
- monitoring.selfMonitoring.grafanaAgent.annotations - object - Grafana Agent annotations + read.image.repository + string + Docker image repository for the read image. Overrides `loki.image.repository`
-{}
+null
 
- monitoring.selfMonitoring.grafanaAgent.enableConfigReadAPI - bool - Enable the config read api on port 8080 of the agent + read.image.tag + string + Docker image tag for the read image. Overrides `loki.image.tag`
-false
+null
 
- monitoring.selfMonitoring.grafanaAgent.installOperator + read.legacyReadTarget bool - Controls whether to install the Grafana Agent Operator and its CRDs. Note that helm will not install CRDs if this flag is enabled during an upgrade. In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds + Whether or not to use the 2 target type simple scalable mode (read, write) or the 3 target type (read, write, backend). Legacy refers to the 2 target type, so true will run two targets, false will run 3 targets.
-true
+false
 
- monitoring.selfMonitoring.grafanaAgent.labels + read.lifecycle object - Additional Grafana Agent labels + Lifecycle for the read container
 {}
 
- monitoring.selfMonitoring.grafanaAgent.priorityClassName - string - The name of the PriorityClass for GrafanaAgent pods + read.nodeSelector + object + Node selector for read pods
-null
+{}
 
- monitoring.selfMonitoring.grafanaAgent.resources - object - Resource requests and limits for the grafanaAgent pods + read.persistence.enableStatefulSetAutoDeletePVC + bool + Enable StatefulSetAutoDeletePVC feature
-{}
+true
 
- monitoring.selfMonitoring.grafanaAgent.tolerations - list - Tolerations for GrafanaAgent pods + read.persistence.selector + string + Selector for persistent disk
-[]
+null
 
- monitoring.selfMonitoring.logsInstance.annotations - object - LogsInstance annotations + read.persistence.size + string + Size of persistent disk
-{}
+"10Gi"
 
- monitoring.selfMonitoring.logsInstance.clients + read.persistence.storageClass string - Additional clients for remote write + Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
 null
 
- monitoring.selfMonitoring.logsInstance.labels + read.podAnnotations object - Additional LogsInstance labels + Annotations for read pods
 {}
 
- monitoring.selfMonitoring.podLogs.additionalPipelineStages - list - Additional pipeline stages to process logs after scraping https://grafana.com/docs/agent/latest/operator/api/#pipelinestagespec-a-namemonitoringgrafanacomv1alpha1pipelinestagespeca -
-[]
-
- - - - monitoring.selfMonitoring.podLogs.annotations + read.podLabels object - PodLogs annotations + Additional labels for each `read` pod
 {}
 
- monitoring.selfMonitoring.podLogs.apiVersion + read.podManagementPolicy string - PodLogs version + The default is to deploy all pods in parallel.
-"monitoring.grafana.com/v1alpha1"
+"Parallel"
 
- monitoring.selfMonitoring.podLogs.labels - object - Additional PodLogs labels + read.priorityClassName + string + The name of the PriorityClass for read pods
-{}
+null
 
- monitoring.selfMonitoring.podLogs.relabelings - list - PodLogs relabel configs to apply to samples before scraping https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + read.replicas + int + Number of replicas for the read
-[]
+3
 
- monitoring.selfMonitoring.tenant + read.resources object - Tenant to use for self monitoring + Resource requests and limits for the read
-{
-  "name": "self-monitoring",
-  "secretNamespace": "{{ .Release.Namespace }}"
-}
+{}
 
- monitoring.selfMonitoring.tenant.name - string - Name of the tenant + read.selectorLabels + object + Additional selector labels for each `read` pod
-"self-monitoring"
+{}
 
- monitoring.selfMonitoring.tenant.secretNamespace - string - Namespace to create additional tenant token secret in. Useful if your Grafana instance is in a separate namespace. Token will still be created in the canary namespace. + read.service.annotations + object + Annotations for read Service
-"{{ .Release.Namespace }}"
+{}
 
- monitoring.serviceMonitor.annotations + read.service.labels object - ServiceMonitor annotations + Additional labels for read Service
 {}
 
- monitoring.serviceMonitor.enabled - bool - If enabled, ServiceMonitor resources for Prometheus Operator are created + read.targetModule + string + Comma-separated list of Loki modules to load for the read
-true
+"read"
 
- monitoring.serviceMonitor.interval - string - ServiceMonitor scrape interval Default is 15s because included recording rules use a 1m rate, and scrape interval needs to be at least 1/4 rate interval. + read.terminationGracePeriodSeconds + int + Grace period to allow the read to shutdown before it is killed
-"15s"
+30
 
- monitoring.serviceMonitor.labels - object - Additional ServiceMonitor labels + read.tolerations + list + Tolerations for read pods
-{}
+[]
 
- monitoring.serviceMonitor.metricRelabelings + read.topologySpreadConstraints list - ServiceMonitor metric relabel configs to apply to samples before ingestion https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + Topology Spread Constraints for read pods
 []
 
- monitoring.serviceMonitor.metricsInstance + resultsCache.affinity object - If defined, will create a MetricsInstance for the Grafana Agent Operator. + Affinity for results-cache pods
-{
-  "annotations": {},
-  "enabled": true,
-  "labels": {},
-  "remoteWrite": null
-}
+{}
 
- monitoring.serviceMonitor.metricsInstance.annotations + resultsCache.allocatedMemory + int + Amount of memory allocated to results-cache for object storage (in MB). +
+1024
+
+ + + + resultsCache.annotations object - MetricsInstance annotations + Annotations for the results-cache pods
 {}
 
- monitoring.serviceMonitor.metricsInstance.enabled - bool - If enabled, MetricsInstance resources for Grafana Agent Operator are created + resultsCache.connectionLimit + int + Maximum number of connections allowed
-true
+16384
 
- monitoring.serviceMonitor.metricsInstance.labels - object - Additional MetricsInstance labels + resultsCache.defaultValidity + string + Specify how long cached results should be stored in the results-cache before being expired
-{}
+"12h"
 
- monitoring.serviceMonitor.metricsInstance.remoteWrite - string - If defined a MetricsInstance will be created to remote write metrics. + resultsCache.enabled + bool + Specifies whether memcached based results-cache should be enabled
-null
+true
 
- monitoring.serviceMonitor.namespaceSelector + resultsCache.extraArgs object - Namespace selector for ServiceMonitor resources + Additional CLI args for results-cache
 {}
 
- monitoring.serviceMonitor.relabelings + resultsCache.extraContainers list - ServiceMonitor relabel configs to apply to samples before scraping https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + Additional containers to be added to the results-cache pod.
 []
 
- monitoring.serviceMonitor.scheme + resultsCache.extraExtendedOptions string - ServiceMonitor will use http by default, but you can pick https as well + Add extended options for results-cache memcached container. The format is the same as for the memcached -o/--extend flag. Example: extraExtendedOptions: 'tls,modern,track_sizes'
-"http"
+""
 
- monitoring.serviceMonitor.scrapeTimeout - string - ServiceMonitor scrape timeout in Go duration format (e.g. 15s) + resultsCache.extraVolumeMounts + list + Additional volume mounts to be added to the results-cache pod (applies to both memcached and exporter containers). Example: extraVolumeMounts: - name: extra-volume mountPath: /etc/extra-volume readOnly: true
-null
+[]
 
- monitoring.serviceMonitor.tlsConfig - string - ServiceMonitor will use these tlsConfig settings to make the health check requests + resultsCache.extraVolumes + list + Additional volumes to be added to the results-cache pod (applies to both memcached and exporter containers). Example: extraVolumes: - name: extra-volume secret: secretName: extra-volume-secret
-null
+[]
 
- nameOverride - string - Overrides the chart's name + resultsCache.initContainers + list + Extra init containers for results-cache pods
-null
+[]
 
- networkPolicy.alertmanager.namespaceSelector - object - Specifies the namespace the alertmanager is running in + resultsCache.maxItemMemory + int + Maximum item results-cache for memcached (in MB).
-{}
+5
 
- networkPolicy.alertmanager.podSelector + resultsCache.nodeSelector object - Specifies the alertmanager Pods. As this is cross-namespace communication, you also need the namespaceSelector. + Node selector for results-cache pods
 {}
 
- networkPolicy.alertmanager.port - int - Specify the alertmanager port used for alerting + resultsCache.podAnnotations + object + Annotations for results-cache pods
-9093
+{}
 
- networkPolicy.discovery.namespaceSelector + resultsCache.podDisruptionBudget object - Specifies the namespace the discovery Pods are running in + Pod Disruption Budget
-{}
+{
+  "maxUnavailable": 1
+}
 
- networkPolicy.discovery.podSelector + resultsCache.podLabels object - Specifies the Pods labels used for discovery. As this is cross-namespace communication, you also need the namespaceSelector. + Labels for results-cache pods
 {}
 
- networkPolicy.discovery.port - int - Specify the port used for discovery + resultsCache.podManagementPolicy + string + Management policy for results-cache pods
-null
+"Parallel"
 
- networkPolicy.egressKubeApiserver.enabled - bool - Enable additional cilium egress rules to kube-apiserver for backend. + resultsCache.port + int + Port of the results-cache service
-false
+11211
 
- networkPolicy.egressWorld.enabled - bool - Enable additional cilium egress rules to external world for write, read and backend. + resultsCache.priorityClassName + string + The name of the PriorityClass for results-cache pods
-false
+null
 
- networkPolicy.enabled - bool - Specifies whether Network Policies should be created + resultsCache.replicas + int + Total number of results-cache replicas
-false
+1
 
- networkPolicy.externalStorage.cidrs - list - Specifies specific network CIDRs you want to limit access to + resultsCache.resources + string + Resource requests and limits for the results-cache By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)).
-[]
+null
 
- networkPolicy.externalStorage.ports - list - Specify the port used for external storage, e.g. AWS S3 + resultsCache.service + object + Service annotations and labels
-[]
+{
+  "annotations": {},
+  "labels": {}
+}
 
- networkPolicy.flavor - string - Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) or Cilium Network Policies (flavor: cilium) + resultsCache.statefulStrategy + object + Stateful results-cache strategy
-"kubernetes"
+{
+  "type": "RollingUpdate"
+}
 
- networkPolicy.ingress.namespaceSelector - object - Specifies the namespaces which are allowed to access the http port + resultsCache.terminationGracePeriodSeconds + int + Grace period to allow the results-cache to shutdown before it is killed
-{}
+60
 
- networkPolicy.ingress.podSelector - object - Specifies the Pods which are allowed to access the http port. As this is cross-namespace communication, you also need the namespaceSelector. + resultsCache.timeout + string + Memcached operation timeout
-{}
+"500ms"
 
- networkPolicy.metrics.cidrs + resultsCache.tolerations list - Specifies specific network CIDRs which are allowed to access the metrics port. In case you use namespaceSelector, you also have to specify your kubelet networks here. The metrics ports are also used for probes. + Tolerations for results-cache pods
 []
 
- networkPolicy.metrics.namespaceSelector - object - Specifies the namespaces which are allowed to access the metrics port -
-{}
-
- - - - networkPolicy.metrics.podSelector - object - Specifies the Pods which are allowed to access the metrics port. As this is cross-namespace communication, you also need the namespaceSelector. + resultsCache.topologySpreadConstraints + list + topologySpreadConstraints allows to customize the default topologySpreadConstraints. This can be either a single dict as shown below or a slice of topologySpreadConstraints. labelSelector is taken from the constraint itself (if it exists) or is generated by the chart using the same selectors as for services.
-{}
+[]
 
- rbac.namespaced - bool - Whether to install RBAC in the namespace only or cluster-wide. Useful if you want to watch ConfigMap globally. + resultsCache.writebackBuffer + int + Max number of objects to use for cache write back
-false
+500000
 
- rbac.pspAnnotations - object - Specify PSP annotations Ref: https://kubernetes.io/docs/reference/access-authn-authz/psp-to-pod-security-standards/#podsecuritypolicy-annotations + resultsCache.writebackParallelism + int + Number of parallel threads for cache write back
-{}
+1
 
- rbac.pspEnabled - bool - If pspEnabled true, a PodSecurityPolicy is created for K8s that use psp. + resultsCache.writebackSizeLimit + string + Max memory to use for cache write back
-false
+"500MB"
 
- rbac.sccEnabled - bool - For OpenShift set pspEnabled to 'false' and sccEnabled to 'true' to use the SecurityContextConstraints. + rollout_operator + object + Setting for the Grafana Rollout Operator https://github.com/grafana/helm-charts/tree/main/charts/rollout-operator
-false
+{
+  "enabled": false,
+  "podSecurityContext": {
+    "fsGroup": 10001,
+    "runAsGroup": 10001,
+    "runAsNonRoot": true,
+    "runAsUser": 10001,
+    "seccompProfile": {
+      "type": "RuntimeDefault"
+    }
+  },
+  "securityContext": {
+    "allowPrivilegeEscalation": false,
+    "capabilities": {
+      "drop": [
+        "ALL"
+      ]
+    },
+    "readOnlyRootFilesystem": true
+  }
+}
 
- read.affinity - string - Affinity for read pods. Passed through `tpl` and, thus, to be configured as string -
-Hard node and soft zone anti-affinity
+			rollout_operator.podSecurityContext
+			object
+			podSecurityContext is the pod security context for the rollout operator. When installing on OpenShift, override podSecurityContext settings with  rollout_operator:   podSecurityContext:     fsGroup: null     runAsGroup: null     runAsUser: null
+			
+{
+  "fsGroup": 10001,
+  "runAsGroup": 10001,
+  "runAsNonRoot": true,
+  "runAsUser": 10001,
+  "seccompProfile": {
+    "type": "RuntimeDefault"
+  }
+}
 
- read.annotations + ruler object - Annotations for read deployment + Configuration for the ruler
-{}
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "ruler"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "appProtocol": {
+    "grpc": ""
+  },
+  "command": null,
+  "directories": {},
+  "dnsConfig": {},
+  "enabled": true,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "hostAliases": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "initContainers": [],
+  "maxUnavailable": null,
+  "nodeSelector": {},
+  "persistence": {
+    "annotations": {},
+    "enabled": false,
+    "size": "10Gi",
+    "storageClass": null
+  },
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "replicas": 0,
+  "resources": {},
+  "serviceLabels": {},
+  "terminationGracePeriodSeconds": 300,
+  "tolerations": []
+}
 
- read.autoscaling.behavior + ruler.affinity object - Behavior policies while scaling. -
-{}
+			Affinity for ruler pods.
+			
+Hard node anti-affinity
 
- read.autoscaling.enabled - bool - Enable autoscaling for the read, this is only used if `queryIndex.enabled: true` + ruler.appProtocol + object + Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
-false
+{
+  "grpc": ""
+}
 
- read.autoscaling.maxReplicas - int - Maximum autoscaling replicas for the read + ruler.command + string + Command to execute instead of defined in Docker image
-6
+null
 
- read.autoscaling.minReplicas - int - Minimum autoscaling replicas for the read + ruler.directories + object + Directories containing rules files
-2
+{}
 
- read.autoscaling.targetCPUUtilizationPercentage - int - Target CPU utilisation percentage for the read + ruler.dnsConfig + object + DNSConfig for ruler pods
-60
+{}
 
- read.autoscaling.targetMemoryUtilizationPercentage - string - Target memory utilisation percentage for the read + ruler.enabled + bool + The ruler component is optional and can be disabled if desired.
-null
+true
 
- read.dnsConfig - object - DNS config for read pods + ruler.extraArgs + list + Additional CLI args for the ruler
-{}
+[]
 
- read.extraArgs + ruler.extraContainers list - Additional CLI args for the read + Containers to add to the ruler pods
 []
 
- read.extraContainers + ruler.extraEnv list - Containers to add to the read pods + Environment variables to add to the ruler pods
 []
 
- read.extraEnv + ruler.extraEnvFrom list - Environment variables to add to the read pods + Environment variables from secrets or configmaps to add to the ruler pods
 []
 
- read.extraEnvFrom + ruler.extraVolumeMounts list - Environment variables from secrets or configmaps to add to the read pods + Volume mounts to add to the ruler pods
 []
 
- read.extraVolumeMounts + ruler.extraVolumes list - Volume mounts to add to the read pods + Volumes to add to the ruler pods
 []
 
- read.extraVolumes + ruler.hostAliases list - Volumes to add to the read pods + hostAliases to add
 []
 
- read.image.registry + ruler.image.registry string - The Docker registry for the read image. Overrides `loki.image.registry` + The Docker registry for the ruler image. Overrides `loki.image.registry`
 null
 
- read.image.repository + ruler.image.repository string - Docker image repository for the read image. Overrides `loki.image.repository` + Docker image repository for the ruler image. Overrides `loki.image.repository`
 null
 
- read.image.tag + ruler.image.tag string - Docker image tag for the read image. Overrides `loki.image.tag` + Docker image tag for the ruler image. Overrides `loki.image.tag`
 null
 
- read.legacyReadTarget - bool - Whether or not to use the 2 target type simple scalable mode (read, write) or the 3 target type (read, write, backend). Legacy refers to the 2 target type, so true will run two targets, false will run 3 targets. + ruler.initContainers + list + Init containers to add to the ruler pods
-false
+[]
 
- read.lifecycle - object - Lifecycle for the read container + ruler.maxUnavailable + string + Pod Disruption Budget maxUnavailable
-{}
+null
 
- read.nodeSelector + ruler.nodeSelector object - Node selector for read pods + Node selector for ruler pods
 {}
 
- read.persistence.enableStatefulSetAutoDeletePVC - bool - Enable StatefulSetAutoDeletePVC feature + ruler.persistence.annotations + object + Annotations for ruler PVCs
-true
+{}
 
- read.persistence.selector - string - Selector for persistent disk + ruler.persistence.enabled + bool + Enable creating PVCs which is required when using recording rules
-null
+false
 
- read.persistence.size + ruler.persistence.size string Size of persistent disk
@@ -3483,7 +9513,7 @@ null
 
 		
 		
-			read.persistence.storageClass
+			ruler.persistence.storageClass
 			string
 			Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
 			
@@ -3492,117 +9522,72 @@ null
 
 		
 		
-			read.podAnnotations
+			ruler.podAnnotations
 			object
-			Annotations for read pods
+			Annotations for ruler pods
 			
 {}
 
- read.podLabels + ruler.podLabels object - Additional labels for each `read` pod + Labels for compactor pods
 {}
 
- read.podManagementPolicy - string - The default is to deploy all pods in parallel. -
-"Parallel"
-
- - - - read.priorityClassName + ruler.priorityClassName string - The name of the PriorityClass for read pods + The name of the PriorityClass for ruler pods
 null
 
- read.replicas + ruler.replicas int - Number of replicas for the read -
-3
-
- - - - read.resources - object - Resource requests and limits for the read -
-{}
-
- - - - read.selectorLabels - object - Additional selector labels for each `read` pod + Number of replicas for the ruler
-{}
+0
 
- read.service.annotations + ruler.resources object - Annotations for read Service + Resource requests and limits for the ruler
 {}
 
- read.service.labels + ruler.serviceLabels object - Additional labels for read Service + Labels for ruler service
 {}
 
- read.targetModule - string - Comma-separated list of Loki modules to load for the read -
-"read"
-
- - - - read.terminationGracePeriodSeconds + ruler.terminationGracePeriodSeconds int - Grace period to allow the read to shutdown before it is killed -
-30
-
- - - - read.tolerations - list - Tolerations for read pods + Grace period to allow the ruler to shutdown before it is killed
-[]
+300
 
- read.topologySpreadConstraints + ruler.tolerations list - Topology Spread Constraints for read pods + Tolerations for ruler pods
 []
 
@@ -3853,10 +9838,10 @@ false singleBinary.affinity - string - Affinity for single binary pods. Passed through `tpl` and, thus, to be configured as string + object + Affinity for single binary pods.
-Hard node and soft zone anti-affinity
+Hard node anti-affinity
 
@@ -4164,14 +10149,66 @@ null
 []
 
+ + + + tableManager + object + DEPRECATED Configuration for the table-manager. The table-manager is only necessary when using a deprecated index type such as Cassandra, Bigtable, or DynamoDB, it has not been necessary since loki introduced self- contained index types like 'boltdb-shipper' and 'tsdb'. This will be removed in a future helm chart. +
+{
+  "affinity": {
+    "podAntiAffinity": {
+      "requiredDuringSchedulingIgnoredDuringExecution": [
+        {
+          "labelSelector": {
+            "matchLabels": {
+              "app.kubernetes.io/component": "table-manager"
+            }
+          },
+          "topologyKey": "kubernetes.io/hostname"
+        }
+      ]
+    }
+  },
+  "annotations": {},
+  "command": null,
+  "dnsConfig": {},
+  "enabled": false,
+  "extraArgs": [],
+  "extraContainers": [],
+  "extraEnv": [],
+  "extraEnvFrom": [],
+  "extraVolumeMounts": [],
+  "extraVolumes": [],
+  "image": {
+    "registry": null,
+    "repository": null,
+    "tag": null
+  },
+  "nodeSelector": {},
+  "podAnnotations": {},
+  "podLabels": {},
+  "priorityClassName": null,
+  "resources": {},
+  "retention_deletes_enabled": false,
+  "retention_period": 0,
+  "service": {
+    "annotations": {},
+    "labels": {}
+  },
+  "terminationGracePeriodSeconds": 30,
+  "tolerations": []
+}
+
tableManager.affinity - string - Affinity for table-manager pods. Passed through `tpl` and, thus, to be configured as string + object + Affinity for table-manager pods.
-Hard node and soft zone anti-affinity
+Hard node and anti-affinity
 
@@ -4398,16 +10435,17 @@ false
 {
   "annotations": {},
+  "canaryServiceAddress": "http://loki-canary:3500/metrics",
   "enabled": true,
   "image": {
     "digest": null,
     "pullPolicy": "IfNotPresent",
     "registry": "docker.io",
     "repository": "grafana/loki-helm-test",
-    "tag": null
+    "tag": "ewelch-distributed-helm-chart-17db5ee"
   },
   "labels": {},
-  "prometheusAddress": "http://prometheus:9090",
+  "prometheusAddress": "",
   "timeout": "1m"
 }
 
@@ -4420,6 +10458,15 @@ false
 {}
 
+ + + + test.canaryServiceAddress + string + Used to directly query the metrics endpoint of the canary for testing, this approach avoids needing prometheus for testing. This in a newer approach to using prometheusAddress such that tests do not have a dependency on prometheus +
+"http://loki-canary:3500/metrics"
+
@@ -4432,7 +10479,7 @@ false "pullPolicy": "IfNotPresent", "registry": "docker.io", "repository": "grafana/loki-helm-test", - "tag": null + "tag": "ewelch-distributed-helm-chart-17db5ee" }
@@ -4478,7 +10525,7 @@ null string Overrides the image tag whose default is the chart's appVersion
-null
+"ewelch-distributed-helm-chart-17db5ee"
 
@@ -4494,9 +10541,9 @@ null test.prometheusAddress string - Address of the prometheus server to query for the test + Address of the prometheus server to query for the test. This overrides any value set for canaryServiceAddress. This is kept for backward compatibility and may be removed in future releases. Previous value was 'http://prometheus:9090'
-"http://prometheus:9090"
+""
 
@@ -4511,10 +10558,10 @@ null write.affinity - string - Affinity for write pods. Passed through `tpl` and, thus, to be configured as string + object + Affinity for write pods.
-Hard node and soft zone anti-affinity
+Hard node anti-affinity
 
diff --git a/docs/sources/setup/upgrade/upgrade-to-6x/index.md b/docs/sources/setup/upgrade/upgrade-to-6x/index.md new file mode 100644 index 000000000000..48f4fde89062 --- /dev/null +++ b/docs/sources/setup/upgrade/upgrade-to-6x/index.md @@ -0,0 +1,89 @@ +--- +title: Upgrade the Helm chart to 6.0 +menuTitle: Upgrade the Helm chart to 6.0 +description: Upgrade the Helm chart from 5.x to 6.0. +weight: 800 +keywords: + - upgrade +--- + +## Upgrading to v6.x + +v6.x of this chart introduces distributed mode but also introduces breaking changes from v5x. + +### Changes + +#### BREAKING: `deploymentMode` setting + +This only breaks you if you are running the chart in Single Binary mode, you will need to set + +``` +deploymentMode: SingleBinary +``` + +#### BREAKING: `lokiCanary` section was moved + +This section was moved from within the `monitoring` section to the root level of the values file. + +#### BREAKING: `topologySpreadConstraints` and `podAffinity` converted to objects + +Previously they were strings which were passed through `tpl` now they are normal objects which will be added to deployments. + +Also we removed the soft constraint on zone. + +#### BREAKING: `externalConfigSecretName` was removed and replaced. + +Instead you can now provide `configObjectName` which is used by Loki components for loading the config. + +`generatedConfigObjectName` also can be used to control the name of the config object created by the chart. + +This gives greater flexibility in using the chart to still generate a config object but allowing for another process to load and mutate this config into a new object which can be loaded by Loki and `configObjectName` + +#### Monitoring + +After some consideration of how this chart works with other charts provided by Grafana, we decided to deprecate the monitoring sections of this chart and take a new approach entirely to monitoring Loki, Mimir and Tempo with the [Meta Monitoring Chart](https://github.com/grafana/meta-monitoring-chart). + +Reasons: + * There were conflicts with this chart and the Mimir chart both installing the Agent Operator. + * The Agent Operator is deprecated. + * The dependency on the Prometheus operator is not one we are able to support well. + +The [Meta Monitoring Chart](https://github.com/grafana/meta-monitoring-chart) is an improvement over the the previous approach because it allows for installing a clustered Grafana Agent which can send metrics, logs, and traces to Grafana Cloud, or letting you install a monitoring-only local installation of Loki, Mimir, Tempo, and Grafana. + +The monitoring sections of this chart still exist but are disabled by default. + +If you wish to continue using the self monitoring features you should use the following configuration, but please do note a future version of this chart will remove this capability completely: + +``` +monitoring: + enabled: true + selfMonitoring: + enabled: true + grafanaAgent: + installOperator: true +``` + +#### Memcached is included and enabled by default + +Caching is crucial to the proper operation of Loki and Memcached is now included in this chart and enabled by default for the `chunksCache` and `resultsCache`. + +If you are already running Memcached separately you can remove your existing installation and use the Memcached deployments built into this chart. + +##### Single Binary + +Memcached also deploys for the Single Binary, but this may not be desired in resource constrained environments. + +You can disable it with the following configuration: + +``` +chunksCache: + enabled: false +resultsCache: + enabled: false +``` + +With these caches disabled, Loki will return to defaults which enables an in-memory results and chunks cache, so you will still get some caching. + +#### Distributed mode + +This chart introduces the ability to run Loki in distributed, or [microservices mode](https://grafana.com/docs/loki/latest/get-started/deployment-modes/#microservices-mode). Separate instructions on how to enable this as well as how to migrate from the existing community chart will be coming shortly. diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 60ff91c766e1..891b1e80e6cf 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,12 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.0.0 + +- [CHANGE] the lokiCanary section was moved from under monitoring to be under the root of the file. +- [CHANGE] the definitions for topologySpreadConstraints and podAffinity were converted from string templates to objects. Also removed the soft constraint on zone. +- [CHANGE] the externalConfigSecretName was replaced with more generic configs + ## 5.47.2 - [ENHANCEMENT] Allow for additional pipeline stages to be configured on the `selfMonitoring` `Podlogs` resource. diff --git a/production/helm/loki/Chart.lock b/production/helm/loki/Chart.lock index e8c779c50330..5d6d29141b12 100644 --- a/production/helm/loki/Chart.lock +++ b/production/helm/loki/Chart.lock @@ -5,5 +5,8 @@ dependencies: - name: grafana-agent-operator repository: https://grafana.github.io/helm-charts version: 0.3.15 -digest: sha256:b7a42cd0e56544f6168a586fde03e26c801bb20cf69bc004a8f6000d93b98100 -generated: "2024-01-27T21:57:28.190462917+05:30" +- name: rollout-operator + repository: https://grafana.github.io/helm-charts + version: 0.13.0 +digest: sha256:d0e60c2879039ee5e8b7b10530f0e8790d6d328ee8afca71f01128627e921587 +generated: "2024-04-07T14:12:43.317329844-04:00" diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 21972fe00709..b51eefcb2936 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application -appVersion: 2.9.6 -version: 5.47.2 +appVersion: 3.0.0 +version: 6.0.0 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki @@ -21,6 +21,11 @@ dependencies: version: 0.3.15 repository: https://grafana.github.io/helm-charts condition: monitoring.selfMonitoring.grafanaAgent.installOperator + - name: rollout-operator + alias: rollout_operator + repository: https://grafana.github.io/helm-charts + version: 0.13.0 + condition: rollout_operator.enabled maintainers: - name: trevorwhitney - name: jeschkies diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 942498d47147..4d732d39e6ee 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.47.2](https://img.shields.io/badge/Version-5.47.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.6](https://img.shields.io/badge/AppVersion-2.9.6-informational?style=flat-square) +![Version: 6.0.0](https://img.shields.io/badge/Version-6.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode @@ -16,5 +16,6 @@ Helm chart for Grafana Loki in simple, scalable mode |------------|------|---------| | https://charts.min.io/ | minio(minio) | 4.0.15 | | https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.3.15 | +| https://grafana.github.io/helm-charts | rollout_operator(rollout-operator) | 0.13.0 | Find more information in the Loki Helm Chart [documentation](https://grafana.com/docs/loki/next/installation/helm). diff --git a/production/helm/loki/ci/default-single-binary-values.yaml b/production/helm/loki/ci/default-single-binary-values.yaml new file mode 100644 index 000000000000..9447810cfa3c --- /dev/null +++ b/production/helm/loki/ci/default-single-binary-values.yaml @@ -0,0 +1,14 @@ +--- +loki: + commonConfig: + replication_factor: 1 + useTestSchema: true +deploymentMode: SingleBinary +singleBinary: + replicas: 1 +read: + replicas: 0 +write: + replicas: 0 +backend: + replicas: 0 diff --git a/production/helm/loki/ci/default-values.yaml b/production/helm/loki/ci/default-values.yaml index c143b416be47..9e5b90bfc273 100644 --- a/production/helm/loki/ci/default-values.yaml +++ b/production/helm/loki/ci/default-values.yaml @@ -2,17 +2,10 @@ loki: commonConfig: replication_factor: 1 - image: - tag: "main-5e53303" + useTestSchema: true read: replicas: 1 write: replicas: 1 backend: replicas: 1 -monitoring: - serviceMonitor: - labels: - release: "prometheus" -test: - prometheusAddress: "http://prometheus-kube-prometheus-prometheus.prometheus.svc.cluster.local.:9090" diff --git a/production/helm/loki/ci/distributed-disabled.yaml b/production/helm/loki/ci/distributed-disabled.yaml new file mode 100644 index 000000000000..c986c8903ee3 --- /dev/null +++ b/production/helm/loki/ci/distributed-disabled.yaml @@ -0,0 +1,32 @@ +--- +loki: + commonConfig: + replication_factor: 1 + useTestSchema: true +deploymentMode: Distributed +backend: + replicas: 0 +read: + replicas: 0 +write: + replicas: 0 +ingester: + replicas: 3 # Kind seems to be a single node for testing so the anti-affinity rules fail here with zone awareness +querier: + replicas: 1 +queryFrontend: + replicas: 1 +queryScheduler: + replicas: 1 +distributor: + replicas: 1 +compactor: + replicas: 1 +indexGateway: + replicas: 1 +bloomCompactor: + replicas: 0 +bloomGateway: + replicas: 0 +minio: + enabled: true diff --git a/production/helm/loki/ci/ingress-values.yaml b/production/helm/loki/ci/ingress-values.yaml index adff785167fe..2ca4119a8f4d 100644 --- a/production/helm/loki/ci/ingress-values.yaml +++ b/production/helm/loki/ci/ingress-values.yaml @@ -11,8 +11,7 @@ gateway: loki: commonConfig: replication_factor: 1 - image: - tag: "main-5e53303" + useTestSchema: true read: replicas: 1 write: diff --git a/production/helm/loki/ci/legacy-monitoring-values.yaml b/production/helm/loki/ci/legacy-monitoring-values.yaml new file mode 100644 index 000000000000..a398ab7b6592 --- /dev/null +++ b/production/helm/loki/ci/legacy-monitoring-values.yaml @@ -0,0 +1,22 @@ +--- +loki: + commonConfig: + replication_factor: 1 + useTestSchema: true +read: + replicas: 1 +write: + replicas: 1 +backend: + replicas: 1 +monitoring: + enabled: true + selfMonitoring: + enabled: true + grafanaAgent: + installOperator: true + serviceMonitor: + labels: + release: "prometheus" +test: + prometheusAddress: "http://prometheus-kube-prometheus-prometheus.prometheus.svc.cluster.local.:9090" diff --git a/production/helm/loki/distributed-values.yaml b/production/helm/loki/distributed-values.yaml new file mode 100644 index 000000000000..0016b724ce57 --- /dev/null +++ b/production/helm/loki/distributed-values.yaml @@ -0,0 +1,70 @@ +--- +loki: + schemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing + max_concurrent: 4 + +#gateway: +# ingress: +# enabled: true +# hosts: +# - host: FIXME +# paths: +# - path: / +# pathType: Prefix + +deploymentMode: Distributed + +ingester: + replicas: 3 +querier: + replicas: 3 + maxUnavailable: 2 +queryFrontend: + replicas: 2 + maxUnavailable: 1 +queryScheduler: + replicas: 2 +distributor: + replicas: 3 + maxUnavailable: 2 +compactor: + replicas: 1 +indexGateway: + replicas: 2 + maxUnavailable: 1 + +bloomCompactor: + replicas: 0 +bloomGateway: + replicas: 0 + +# Enable minio for storage +minio: + enabled: true + +# Zero out replica counts of other deployment modes +backend: + replicas: 0 +read: + replicas: 0 +write: + replicas: 0 + +singleBinary: + replicas: 0 + + diff --git a/production/helm/loki/simple-scalable-values.yaml b/production/helm/loki/simple-scalable-values.yaml new file mode 100644 index 000000000000..78132b6d965e --- /dev/null +++ b/production/helm/loki/simple-scalable-values.yaml @@ -0,0 +1,63 @@ +--- +loki: + schemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing + max_concurrent: 4 + +#gateway: +# ingress: +# enabled: true +# hosts: +# - host: FIXME +# paths: +# - path: / +# pathType: Prefix + +deploymentMode: SimpleScalable + +backend: + replicas: 3 +read: + replicas: 3 +write: + replicas: 3 + +# Enable minio for storage +minio: + enabled: true + +# Zero out replica counts of other deployment modes +singleBinary: + replicas: 0 + +ingester: + replicas: 0 +querier: + replicas: 0 +queryFrontend: + replicas: 0 +queryScheduler: + replicas: 0 +distributor: + replicas: 0 +compactor: + replicas: 0 +indexGateway: + replicas: 0 +bloomCompactor: + replicas: 0 +bloomGateway: + replicas: 0 diff --git a/production/helm/loki/single-binary-values.yaml b/production/helm/loki/single-binary-values.yaml new file mode 100644 index 000000000000..584f0fba1c46 --- /dev/null +++ b/production/helm/loki/single-binary-values.yaml @@ -0,0 +1,79 @@ +--- +loki: + commonConfig: + replication_factor: 1 + schemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing + max_concurrent: 2 + +#gateway: +# ingress: +# enabled: true +# hosts: +# - host: FIXME +# paths: +# - path: / +# pathType: Prefix + +deploymentMode: SingleBinary +singleBinary: + replicas: 1 + resources: + limits: + cpu: 3 + memory: 4Gi + requests: + cpu: 2 + memory: 2Gi + extraEnv: + # Keep a little bit lower than memory limits + - name: GOMEMLIMIT + value: 3750MiB + +chunksCache: + # default is 500MB, with limited memory keep this smaller + writebackSizeLimit: 10MB + +# Enable minio for storage +minio: + enabled: true + +# Zero out replica counts of other deployment modes +backend: + replicas: 0 +read: + replicas: 0 +write: + replicas: 0 + +ingester: + replicas: 0 +querier: + replicas: 0 +queryFrontend: + replicas: 0 +queryScheduler: + replicas: 0 +distributor: + replicas: 0 +compactor: + replicas: 0 +indexGateway: + replicas: 0 +bloomCompactor: + replicas: 0 +bloomGateway: + replicas: 0 diff --git a/production/helm/loki/src/helm-test/canary_test.go b/production/helm/loki/src/helm-test/canary_test.go index 24e9d6d0184f..dd874a89bab3 100644 --- a/production/helm/loki/src/helm-test/canary_test.go +++ b/production/helm/loki/src/helm-test/canary_test.go @@ -7,19 +7,41 @@ import ( "context" "errors" "fmt" + "io" + "net/http" "os" "testing" "time" "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" + promConfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" "github.com/stretchr/testify/require" ) +type testResultFunc func(t *testing.T, ctx context.Context, metric string, test func(model.SampleValue) bool, msg string) error + func TestCanary(t *testing.T) { - totalEntriesQuery := "sum(loki_canary_entries_total)" - totalEntriesMissingQuery := "sum(loki_canary_missing_entries_total)" + + var testResult testResultFunc + + // Default to directly querying a canary and looking for specific metrics. + testResult = testResultCanary + totalEntries := "loki_canary_entries_total" + totalEntriesMissing := "loki_canary_missing_entries_total" + + // For backwards compatibility and also for anyone who wants to validate with prometheus instead of querying + // a canary directly, if the CANARY_PROMETHEUS_ADDRESS is specified we will use prometheus to validate. + address := os.Getenv("CANARY_PROMETHEUS_ADDRESS") + if address != "" { + testResult = testResultPrometheus + // Use the sum function to aggregate the results from multiple canaries. + totalEntries = "sum(loki_canary_entries_total)" + totalEntriesMissing = "sum(loki_canary_missing_entries_total)" + } timeout := getEnv("CANARY_TEST_TIMEOUT", "1m") timeoutDuration, err := time.ParseDuration(timeout) @@ -32,30 +54,18 @@ func TestCanary(t *testing.T) { }) t.Run("Canary should have entries", func(t *testing.T) { - client := newClient(t) - eventually(t, func() error { - result, _, err := client.Query(ctx, totalEntriesQuery, time.Now(), v1.WithTimeout(timeoutDuration)) - if err != nil { - return err - } - return testResult(t, result, totalEntriesQuery, func(v model.SampleValue) bool { + return testResult(t, ctx, totalEntries, func(v model.SampleValue) bool { return v > 0 - }, fmt.Sprintf("Expected %s to be greater than 0", totalEntriesQuery)) + }, fmt.Sprintf("Expected %s to be greater than 0", totalEntries)) }, timeoutDuration, "Expected Loki Canary to have entries") }) t.Run("Canary should not have missed any entries", func(t *testing.T) { - client := newClient(t) - eventually(t, func() error { - result, _, err := client.Query(ctx, totalEntriesMissingQuery, time.Now(), v1.WithTimeout(timeoutDuration)) - if err != nil { - return err - } - return testResult(t, result, totalEntriesMissingQuery, func(v model.SampleValue) bool { + return testResult(t, ctx, totalEntriesMissing, func(v model.SampleValue) bool { return v == 0 - }, fmt.Sprintf("Expected %s to equal 0", totalEntriesMissingQuery)) + }, fmt.Sprintf("Expected %s to equal 0", totalEntriesMissing)) }, timeoutDuration, "Expected Loki Canary to not have any missing entries") }) } @@ -67,7 +77,13 @@ func getEnv(key, fallback string) string { return fallback } -func testResult(t *testing.T, result model.Value, query string, test func(model.SampleValue) bool, msg string) error { +func testResultPrometheus(t *testing.T, ctx context.Context, query string, test func(model.SampleValue) bool, msg string) error { + // TODO (ewelch): if we did a lot of these, we'd want to reuse the client but right now we only run a couple tests + client := newClient(t) + result, _, err := client.Query(ctx, query, time.Now()) + if err != nil { + return err + } if v, ok := result.(model.Vector); ok { for _, s := range v { t.Logf("%s => %v\n", query, s.Value) @@ -75,7 +91,6 @@ func testResult(t *testing.T, result model.Value, query string, test func(model. return errors.New(msg) } } - return nil } @@ -94,6 +109,64 @@ func newClient(t *testing.T) v1.API { return v1.NewAPI(client) } +func testResultCanary(t *testing.T, ctx context.Context, metric string, test func(model.SampleValue) bool, msg string) error { + address := os.Getenv("CANARY_SERVICE_ADDRESS") + require.NotEmpty(t, address, "CANARY_SERVICE_ADDRESS must be set to a valid kubernetes service for the Loki canaries") + + // TODO (ewelch): if we did a lot of these, we'd want to reuse the client but right now we only run a couple tests + client, err := promConfig.NewClientFromConfig(promConfig.HTTPClientConfig{}, "canary-test") + require.NoError(t, err, "Failed to create Prometheus client") + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, address, nil) + require.NoError(t, err, "Failed to create request") + + rsp, err := client.Do(req) + if rsp != nil { + defer rsp.Body.Close() + } + require.NoError(t, err, "Failed to scrape metrics") + + body, err := io.ReadAll(rsp.Body) + require.NoError(t, err, "Failed to read response body") + + p, err := textparse.New(body, rsp.Header.Get("Content-Type"), true) + require.NoError(t, err, "Failed to create Prometheus parser") + + for { + e, err := p.Next() + if err == io.EOF { + return errors.New("metric not found") + } + + if e != textparse.EntrySeries { + continue + } + + l := labels.Labels{} + p.Metric(&l) + + // Currently we aren't validating any labels, just the metric name, however this could be extended to do so. + name := l.Get(model.MetricNameLabel) + if name != metric { + continue + } + + _, _, val := p.Series() + t.Logf("%s => %v\n", metric, val) + + // Note: SampleValue has functions for comparing the equality of two floats which is + // why we convert this back to a SampleValue here for easier use intests. + if !test(model.SampleValue(val)) { + return errors.New(msg) + } + + // Returning here will only validate that one series was found matching the label name that met the condition + // it could be possible since we don't validate the rest of the labels that there is mulitple series + // but currently this meets the spirit of the test. + return nil + } +} + func eventually(t *testing.T, test func() error, timeoutDuration time.Duration, msg string) { require.Eventually(t, func() bool { queryError := test() diff --git a/production/helm/loki/templates/NOTES.txt b/production/helm/loki/templates/NOTES.txt index ad192e764325..6551a427000f 100644 --- a/production/helm/loki/templates/NOTES.txt +++ b/production/helm/loki/templates/NOTES.txt @@ -17,9 +17,20 @@ Installed components: {{- if .Values.minio.enabled }} * minio {{- end }} +{{- if eq (include "loki.deployment.isScalable" .) "true" }} * read * write {{- if not .Values.read.legacyReadTarget }} * backend {{- end }} +{{- else }} +* compactor +* index gateway +* query scheduler +* ruler +* distributor +* ingester +* querier +* query frontend +{{- end }} {{- end }} diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index 14fe80006608..ac7e9717e1f2 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -50,17 +50,24 @@ Params: Return if deployment mode is simple scalable */}} {{- define "loki.deployment.isScalable" -}} - {{- and (eq (include "loki.isUsingObjectStorage" . ) "true") (eq (int .Values.singleBinary.replicas) 0) }} + {{- and (eq (include "loki.isUsingObjectStorage" . ) "true") (or (eq .Values.deploymentMode "SingleBinary<->SimpleScalable") (eq .Values.deploymentMode "SimpleScalable") (eq .Values.deploymentMode "SimpleScalable<->Distributed")) }} {{- end -}} {{/* Return if deployment mode is single binary */}} {{- define "loki.deployment.isSingleBinary" -}} - {{- $nonZeroReplicas := gt (int .Values.singleBinary.replicas) 0 }} - {{- or (eq (include "loki.isUsingObjectStorage" . ) "false") ($nonZeroReplicas) }} + {{- or (eq .Values.deploymentMode "SingleBinary") (eq .Values.deploymentMode "SingleBinary<->SimpleScalable") }} {{- end -}} +{{/* +Return if deployment mode is distributed +*/}} +{{- define "loki.deployment.isDistributed" -}} + {{- and (eq (include "loki.isUsingObjectStorage" . ) "true") (or (eq .Values.deploymentMode "Distributed") (eq .Values.deploymentMode "SimpleScalable<->Distributed")) }} +{{- end -}} + + {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -448,7 +455,7 @@ ruler: {{- end }} {{/* -Calculate the config from structured and unstructred text input +Calculate the config from structured and unstructured text input */}} {{- define "loki.calculatedConfig" -}} {{ tpl (mergeOverwrite (tpl .Values.loki.config . | fromYaml) .Values.loki.structuredConfig | toYaml) . }} @@ -460,10 +467,10 @@ The volume to mount for loki configuration {{- define "loki.configVolume" -}} {{- if eq .Values.loki.configStorageType "Secret" -}} secret: - secretName: {{ tpl .Values.loki.externalConfigSecretName . }} -{{- else if eq .Values.loki.configStorageType "ConfigMap" -}} + secretName: {{ tpl .Values.loki.configObjectName . }} +{{- else -}} configMap: - name: {{ tpl .Values.loki.externalConfigSecretName . }} + name: {{ tpl .Values.loki.configObjectName . }} items: - key: "config.yaml" path: "config.yaml" @@ -697,10 +704,17 @@ http { {{- end }} server { + {{- if (.Values.gateway.nginxConfig.ssl) }} + listen 8080 ssl; + {{- if .Values.gateway.nginxConfig.enableIPv6 }} + listen [::]:8080 ssl; + {{- end }} + {{- else }} listen 8080; {{- if .Values.gateway.nginxConfig.enableIPv6 }} listen [::]:8080; {{- end }} + {{- end }} {{- if .Values.gateway.basicAuth.enabled }} auth_basic "Loki"; @@ -712,6 +726,9 @@ http { auth_basic off; } + ######################################################## + # Configure backend targets + {{- $backendHost := include "loki.backendFullname" .}} {{- $readHost := include "loki.readFullname" .}} {{- $writeHost := include "loki.writeFullname" .}} @@ -720,15 +737,11 @@ http { {{- $backendHost = include "loki.readFullname" . }} {{- end }} - {{- if gt (int .Values.singleBinary.replicas) 0 }} - {{- $backendHost = include "loki.singleBinaryFullname" . }} - {{- $readHost = include "loki.singleBinaryFullname" .}} - {{- $writeHost = include "loki.singleBinaryFullname" .}} - {{- end }} + {{- $httpSchema := .Values.gateway.nginxConfig.schema }} - {{- $writeUrl := printf "http://%s.%s.svc.%s:%s" $writeHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} - {{- $readUrl := printf "http://%s.%s.svc.%s:%s" $readHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} - {{- $backendUrl := printf "http://%s.%s.svc.%s:%s" $backendHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $writeUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $writeHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $readUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $readHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $backendUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $backendHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} {{- if .Values.gateway.nginxConfig.customWriteUrl }} {{- $writeUrl = .Values.gateway.nginxConfig.customWriteUrl }} @@ -740,24 +753,61 @@ http { {{- $backendUrl = .Values.gateway.nginxConfig.customBackendUrl }} {{- end }} + {{- $singleBinaryHost := include "loki.singleBinaryFullname" . }} + {{- $singleBinaryUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $singleBinaryHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + + {{- $distributorHost := include "loki.distributorFullname" .}} + {{- $ingesterHost := include "loki.ingesterFullname" .}} + {{- $queryFrontendHost := include "loki.queryFrontendFullname" .}} + {{- $indexGatewayHost := include "loki.indexGatewayFullname" .}} + {{- $rulerHost := include "loki.rulerFullname" .}} + {{- $compactorHost := include "loki.compactorFullname" .}} + {{- $schedulerHost := include "loki.querySchedulerFullname" .}} + + + {{- $distributorUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $distributorHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) -}} + {{- $ingesterUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $ingesterHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $queryFrontendUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $queryFrontendHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $indexGatewayUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $indexGatewayHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $rulerUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $rulerHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $compactorUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $compactorHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + {{- $schedulerUrl := printf "%s://%s.%s.svc.%s:%s" $httpSchema $schedulerHost .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.http_listen_port | toString) }} + + {{- if eq (include "loki.deployment.isSingleBinary" .) "true"}} + {{- $distributorUrl = $singleBinaryUrl }} + {{- $ingesterUrl = $singleBinaryUrl }} + {{- $queryFrontendUrl = $singleBinaryUrl }} + {{- $indexGatewayUrl = $singleBinaryUrl }} + {{- $rulerUrl = $singleBinaryUrl }} + {{- $compactorUrl = $singleBinaryUrl }} + {{- $schedulerUrl = $singleBinaryUrl }} + {{- else if eq (include "loki.deployment.isScalable" .) "true"}} + {{- $distributorUrl = $writeUrl }} + {{- $ingesterUrl = $writeUrl }} + {{- $queryFrontendUrl = $readUrl }} + {{- $indexGatewayUrl = $backendUrl }} + {{- $rulerUrl = $backendUrl }} + {{- $compactorUrl = $backendUrl }} + {{- $schedulerUrl = $backendUrl }} + {{- end -}} # Distributor location = /api/prom/push { - proxy_pass {{ $writeUrl }}$request_uri; + proxy_pass {{ $distributorUrl }}$request_uri; } location = /loki/api/v1/push { - proxy_pass {{ $writeUrl }}$request_uri; + proxy_pass {{ $distributorUrl }}$request_uri; } location = /distributor/ring { - proxy_pass {{ $writeUrl }}$request_uri; + proxy_pass {{ $distributorUrl }}$request_uri; } # Ingester location = /flush { - proxy_pass {{ $writeUrl }}$request_uri; + proxy_pass {{ $ingesterUrl }}$request_uri; } location ^~ /ingester/ { - proxy_pass {{ $writeUrl }}$request_uri; + proxy_pass {{ $ingesterUrl }}$request_uri; } location = /ingester { internal; # to suppress 301 @@ -765,62 +815,61 @@ http { # Ring location = /ring { - proxy_pass {{ $writeUrl }}$request_uri; + proxy_pass {{ $ingesterUrl }}$request_uri; } # MemberListKV location = /memberlist { - proxy_pass {{ $writeUrl }}$request_uri; + proxy_pass {{ $ingesterUrl }}$request_uri; } - # Ruler location = /ruler/ring { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $rulerUrl }}$request_uri; } location = /api/prom/rules { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $rulerUrl }}$request_uri; } location ^~ /api/prom/rules/ { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $rulerUrl }}$request_uri; } location = /loki/api/v1/rules { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $rulerUrl }}$request_uri; } location ^~ /loki/api/v1/rules/ { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $rulerUrl }}$request_uri; } location = /prometheus/api/v1/alerts { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $rulerUrl }}$request_uri; } location = /prometheus/api/v1/rules { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $rulerUrl }}$request_uri; } # Compactor location = /compactor/ring { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $compactorUrl }}$request_uri; } location = /loki/api/v1/delete { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $compactorUrl }}$request_uri; } location = /loki/api/v1/cache/generation_numbers { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $compactorUrl }}$request_uri; } # IndexGateway location = /indexgateway/ring { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $indexGatewayUrl }}$request_uri; } # QueryScheduler location = /scheduler/ring { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $schedulerUrl }}$request_uri; } # Config location = /config { - proxy_pass {{ $backendUrl }}$request_uri; + proxy_pass {{ $ingesterUrl }}$request_uri; } {{- if and .Values.enterprise.enabled .Values.enterprise.adminApi.enabled }} @@ -836,29 +885,28 @@ http { # QueryFrontend, Querier location = /api/prom/tail { - proxy_pass {{ $readUrl }}$request_uri; + proxy_pass {{ $queryFrontendUrl }}$request_uri; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; } location = /loki/api/v1/tail { - proxy_pass {{ $readUrl }}$request_uri; + proxy_pass {{ $queryFrontendUrl }}$request_uri; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; } location ^~ /api/prom/ { - proxy_pass {{ $readUrl }}$request_uri; + proxy_pass {{ $queryFrontendUrl }}$request_uri; } location = /api/prom { internal; # to suppress 301 } location ^~ /loki/api/v1/ { - proxy_pass {{ $readUrl }}$request_uri; + proxy_pass {{ $queryFrontendUrl }}$request_uri; } location = /loki/api/v1 { internal; # to suppress 301 } - {{- with .Values.gateway.nginxConfig.serverSnippet }} {{ . | nindent 4 }} {{- end }} @@ -893,10 +941,50 @@ enableServiceLinks: false {{/* Determine query-scheduler address */}} {{- define "loki.querySchedulerAddress" -}} -{{- $isSimpleScalable := eq (include "loki.deployment.isScalable" .) "true" -}} {{- $schedulerAddress := ""}} -{{- if and $isSimpleScalable (not .Values.read.legacyReadTarget ) -}} -{{- $schedulerAddress = printf "query-scheduler-discovery.%s.svc.%s.:%s" .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.grpc_listen_port | toString) -}} +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +{{- $schedulerAddress = printf "%s.%s.svc.%s:%s" (include "loki.querySchedulerFullname" .) .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.grpc_listen_port | toString) -}} {{- end -}} {{- printf "%s" $schedulerAddress }} {{- end }} + +{{/* Determine querier address */}} +{{- define "loki.querierAddress" -}} +{{- $querierAddress := "" }} +{{- if "loki.deployment.isDistributed "}} +{{- $querierHost := include "loki.querierFullname" .}} +{{- $querierUrl := printf "http://%s.%s.svc.%s:3100" $querierHost .Release.Namespace .Values.global.clusterDomain }} +{{- $querierAddress = $querierUrl }} +{{- end -}} +{{- printf "%s" $querierAddress }} +{{- end }} + +{{/* Determine index-gateway address */}} +{{- define "loki.indexGatewayAddress" -}} +{{- $idxGatewayAddress := ""}} +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- $isScalable := eq (include "loki.deployment.isScalable" .) "true" -}} +{{- if $isDistributed -}} +{{- $idxGatewayAddress = printf "dns+%s-headless.%s.svc.%s:%s" (include "loki.indexGatewayFullname" .) .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.grpc_listen_port | toString) -}} +{{- end -}} +{{- if $isScalable -}} +{{- $idxGatewayAddress = printf "dns+%s-headless.%s.svc.%s:%s" (include "loki.backendFullname" .) .Release.Namespace .Values.global.clusterDomain (.Values.loki.server.grpc_listen_port | toString) -}} +{{- end -}} +{{- printf "%s" $idxGatewayAddress }} +{{- end }} + +{{- define "loki.config.checksum" -}} +checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodDisruptionBudget. +*/}} +{{- define "loki.pdb.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version) -}} + {{- print "policy/v1" -}} + {{- else -}} + {{- print "policy/v1beta1" -}} + {{- end -}} +{{- end -}} diff --git a/production/helm/loki/templates/admin-api/_helpers.yaml b/production/helm/loki/templates/admin-api/_helpers.yaml new file mode 100644 index 000000000000..e13ff8adbcc3 --- /dev/null +++ b/production/helm/loki/templates/admin-api/_helpers.yaml @@ -0,0 +1,24 @@ +{{/* +adminApi fullname +*/}} +{{- define "enterprise-logs.adminApiFullname" -}} +{{ include "loki.fullname" . }}-admin-api +{{- end }} + +{{/* +adminApi common labels +*/}} +{{- define "enterprise-logs.adminApiLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: admin-api +target: admin-api +{{- end }} + +{{/* +adminApi selector labels +*/}} +{{- define "enterprise-logs.adminApiSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: admin-api +target: admin-api +{{- end }} \ No newline at end of file diff --git a/production/helm/loki/templates/admin-api/deployment-admin-api.yaml b/production/helm/loki/templates/admin-api/deployment-admin-api.yaml new file mode 100644 index 000000000000..15391665ca77 --- /dev/null +++ b/production/helm/loki/templates/admin-api/deployment-admin-api.yaml @@ -0,0 +1,168 @@ +{{- if .Values.enterprise.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "enterprise-logs.adminApiFullname" . }} + labels: + {{- include "enterprise-logs.adminApiLabels" . | nindent 4 }} + {{- with .Values.adminApi.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- with .Values.adminApi.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.adminApi.replicas }} + selector: + matchLabels: + {{- include "enterprise-logs.adminApiSelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.adminApi.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "enterprise-logs.adminApiSelectorLabels" . | nindent 8 }} + {{- with .Values.adminApi.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + app.kubernetes.io/part-of: memberlist + annotations: + {{- if .Values.useExternalConfig }} + checksum/config: {{ .Values.externalConfigVersion }} + {{- else }} + checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} + {{- end}} + {{- with .Values.adminApi.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "loki.serviceAccountName" . }} + {{- if .Values.adminApi.priorityClassName }} + priorityClassName: {{ .Values.adminApi.priorityClassName }} + {{- end }} + securityContext: + {{- toYaml .Values.adminApi.podSecurityContext | nindent 8 }} + initContainers: + # Taken from + # https://github.com/minio/charts/blob/a5c84bcbad884728bff5c9c23541f936d57a13b3/minio/templates/post-install-create-bucket-job.yaml + {{- if .Values.minio.enabled }} + - name: minio-mc + image: "{{ .Values.minio.mcImage.repository }}:{{ .Values.minio.mcImage.tag }}" + imagePullPolicy: {{ .Values.minio.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ .Release.Name }}-minio + - name: MINIO_PORT + value: {{ .Values.minio.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.minio.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.minio.configPathmc }}certs + {{ end }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.adminApi.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: admin-api + image: "{{ template "loki.image" . }}" + imagePullPolicy: {{ .Values.enterprise.image.pullPolicy }} + args: + - -target=admin-api + - -config.file=/etc/loki/config/config.yaml + {{- if .Values.minio.enabled }} + - -admin.client.backend-type=s3 + - -admin.client.s3.endpoint={{ template "loki.minio" . }} + - -admin.client.s3.bucket-name=enterprise-logs-admin + - -admin.client.s3.access-key-id={{ .Values.minio.accessKey }} + - -admin.client.s3.secret-access-key={{ .Values.minio.secretKey }} + - -admin.client.s3.insecure=true + {{- end }} + {{- range $key, $value := .Values.adminApi.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: license + mountPath: /etc/loki/license + - name: storage + mountPath: /data + {{- if .Values.adminApi.extraVolumeMounts }} + {{ toYaml .Values.adminApi.extraVolumeMounts | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + readinessProbe: + {{- toYaml .Values.adminApi.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.adminApi.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.adminApi.containerSecurityContext | nindent 12 }} + env: + {{- if .Values.adminApi.env }} + {{ toYaml .Values.adminApi.env | nindent 12 }} + {{- end }} + {{- with .Values.adminApi.extraContainers }} + {{ toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.adminApi.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.adminApi.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.adminApi.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.adminApi.terminationGracePeriodSeconds }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.adminApi.extraVolumes }} + {{ toYaml .Values.adminApi.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.minio.enabled }} + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ .Release.Name }}-minio + - secret: + name: {{ .Release.Name }}-minio + {{- if .Values.minio.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.minio.tls.certSecret }} + items: + - key: {{ .Values.minio.tls.publicCrt }} + path: CAs/public.crt + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/production/helm/loki/templates/admin-api/service-admin-api.yaml b/production/helm/loki/templates/admin-api/service-admin-api.yaml new file mode 100644 index 000000000000..c7daa2790a12 --- /dev/null +++ b/production/helm/loki/templates/admin-api/service-admin-api.yaml @@ -0,0 +1,28 @@ +{{- if .Values.enterprise.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "enterprise-logs.adminApiFullname" . }} + labels: + {{- include "enterprise-logs.adminApiLabels" . | nindent 4 }} + {{- with .Values.adminApi.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- with .Values.adminApi.service.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3100 + protocol: TCP + targetPort: http-metrics + - name: grpc + port: 9095 + protocol: TCP + targetPort: grpc + selector: + {{- include "enterprise-logs.adminApiSelectorLabels" . | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/production/helm/loki/templates/backend/clusterrole.yaml b/production/helm/loki/templates/backend/clusterrole.yaml index 176ada056626..e8631c35a501 100644 --- a/production/helm/loki/templates/backend/clusterrole.yaml +++ b/production/helm/loki/templates/backend/clusterrole.yaml @@ -1,4 +1,5 @@ -{{- if and (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} +{{- $isSimpleScalable := eq (include "loki.deployment.isScalable" .) "true" -}} +{{- if and $isSimpleScalable (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -17,4 +18,4 @@ rules: {{- else }} rules: [] {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/production/helm/loki/templates/backend/clusterrolebinding.yaml b/production/helm/loki/templates/backend/clusterrolebinding.yaml index 1021fd008980..619b70260cd4 100644 --- a/production/helm/loki/templates/backend/clusterrolebinding.yaml +++ b/production/helm/loki/templates/backend/clusterrolebinding.yaml @@ -1,4 +1,5 @@ -{{- if and (not .Values.rbac.namespaced) }} +{{- $isSimpleScalable := eq (include "loki.deployment.isScalable" .) "true" -}} +{{- if and $isSimpleScalable (not .Values.rbac.namespaced) }} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -21,4 +22,4 @@ roleRef: name: {{ .Values.rbac.useExistingRole }} {{- end }} apiGroup: rbac.authorization.k8s.io -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/production/helm/loki/templates/backend/statefulset-backend.yaml b/production/helm/loki/templates/backend/statefulset-backend.yaml index 97e110ea2d54..1b7ead3ae1ad 100644 --- a/production/helm/loki/templates/backend/statefulset-backend.yaml +++ b/production/helm/loki/templates/backend/statefulset-backend.yaml @@ -205,7 +205,7 @@ spec: {{- toYaml .Values.backend.resources | nindent 12 }} {{- with .Values.backend.affinity }} affinity: - {{- tpl . $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.backend.dnsConfig }} dnsConfig: @@ -231,12 +231,7 @@ spec: {{- toYaml .Values.backend.persistence.dataVolumeParameters | nindent 10 }} {{- end}} - name: config - {{- if .Values.loki.existingSecretForConfig }} - secret: - secretName: {{ .Values.loki.existingSecretForConfig }} - {{- else }} {{- include "loki.configVolume" . | nindent 10 }} - {{- end }} - name: runtime-config configMap: name: {{ template "loki.name" . }}-runtime diff --git a/production/helm/loki/templates/bloom-compactor/_helpers-bloom-compactor.tpl b/production/helm/loki/templates/bloom-compactor/_helpers-bloom-compactor.tpl new file mode 100644 index 000000000000..193a8f883b12 --- /dev/null +++ b/production/helm/loki/templates/bloom-compactor/_helpers-bloom-compactor.tpl @@ -0,0 +1,58 @@ +{{/* +bloom compactor fullname +*/}} +{{- define "loki.bloomCompactorFullname" -}} +{{ include "loki.fullname" . }}-bloom-compactor +{{- end }} + +{{/* +bloom compactor common labels +*/}} +{{- define "loki.bloomCompactorLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: bloom-compactor +{{- end }} + +{{/* +bloom compactor selector labels +*/}} +{{- define "loki.bloomCompactorSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: bloom-compactor +{{- end }} + +{{/* +bloom compactor readinessProbe +*/}} +{{- define "loki.bloomCompactor.readinessProbe" -}} +{{- with .Values.bloomCompactor.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- else }} +{{- with .Values.loki.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +bloom compactor priority class name +*/}} +{{- define "loki.bloomCompactorPriorityClassName" }} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.bloomCompactor.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} + +{{/* +Create the name of the bloom compactor service account +*/}} +{{- define "loki.bloomCompactorServiceAccountName" -}} +{{- if .Values.bloomCompactor.serviceAccount.create -}} + {{ default (print (include "loki.serviceAccountName" .) "-bloom-compactor") .Values.bloomCompactor.serviceAccount.name }} +{{- else -}} + {{ default (include "loki.serviceAccountName" .) .Values.bloomCompactor.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/production/helm/loki/templates/bloom-compactor/statefulset-bloom-compactor.yaml b/production/helm/loki/templates/bloom-compactor/statefulset-bloom-compactor.yaml new file mode 100644 index 000000000000..fbece8f2953e --- /dev/null +++ b/production/helm/loki/templates/bloom-compactor/statefulset-bloom-compactor.yaml @@ -0,0 +1,179 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +{{- if (gt (int .Values.bloomCompactor.replicas) 0) -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.bloomCompactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.bloomCompactorLabels" . | nindent 4 }} + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.bloomCompactor.replicas }} + podManagementPolicy: Parallel + updateStrategy: + rollingUpdate: + partition: 0 + serviceName: {{ include "loki.bloomCompactorFullname" . }}-headless + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.bloomCompactor.persistence.enableStatefulSetAutoDeletePVC) }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.bloomCompactor.persistence.whenDeleted }} + whenScaled: {{ .Values.bloomCompactor.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.bloomCompactorSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomCompactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.bloomCompactorSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomCompactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomCompactor.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.bloomCompactorPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.bloomCompactor.terminationGracePeriodSeconds }} + {{- with .Values.bloomCompactor.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: bloom-compactor + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.bloomCompactor.command }} + command: + - {{ coalesce .Values.bloomCompactor.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=bloom-compactor + {{- with .Values.bloomCompactor.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.bloomCompactor.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.bloomCompactor.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.bloomCompactor.readinessProbe" . | nindent 10 }} + volumeMounts: + - name: temp + mountPath: /tmp + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.bloomCompactor.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.bloomCompactor.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.bloomCompactor.extraContainers }} + {{- toYaml .Values.bloomCompactor.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.bloomCompactor.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomCompactor.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: temp + emptyDir: {} + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- if not .Values.bloomCompactor.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- with .Values.bloomCompactor.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.bloomCompactor.persistence.enabled }} + volumeClaimTemplates: + {{- range .Values.bloomCompactor.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/production/helm/loki/templates/bloom-gateway/_helpers-bloom-gateway.tpl b/production/helm/loki/templates/bloom-gateway/_helpers-bloom-gateway.tpl new file mode 100644 index 000000000000..f0cef4f179da --- /dev/null +++ b/production/helm/loki/templates/bloom-gateway/_helpers-bloom-gateway.tpl @@ -0,0 +1,58 @@ +{{/* +bloom gateway fullname +*/}} +{{- define "loki.bloomGatewayFullname" -}} +{{ include "loki.fullname" . }}-bloom-gateway +{{- end }} + +{{/* +bloom gateway common labels +*/}} +{{- define "loki.bloomGatewayLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: bloom-gateway +{{- end }} + +{{/* +bloom gateway selector labels +*/}} +{{- define "loki.bloomGatewaySelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: bloom-gateway +{{- end }} + +{{/* +bloom gateway readinessProbe +*/}} +{{- define "loki.bloomGateway.readinessProbe" -}} +{{- with .Values.bloomGateway.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- else }} +{{- with .Values.loki.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +bloom gateway priority class name +*/}} +{{- define "loki.bloomGatewayPriorityClassName" }} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.bloomGateway.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} + +{{/* +Create the name of the bloom gateway service account +*/}} +{{- define "loki.bloomGatewayServiceAccountName" -}} +{{- if .Values.bloomGateway.serviceAccount.create -}} + {{ default (print (include "loki.serviceAccountName" .) "-bloom-gateway") .Values.bloomGateway.serviceAccount.name }} +{{- else -}} + {{ default (include "loki.serviceAccountName" .) .Values.bloomGateway.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/production/helm/loki/templates/bloom-gateway/statefulset-bloom-gateway.yaml b/production/helm/loki/templates/bloom-gateway/statefulset-bloom-gateway.yaml new file mode 100644 index 000000000000..353b0203fe79 --- /dev/null +++ b/production/helm/loki/templates/bloom-gateway/statefulset-bloom-gateway.yaml @@ -0,0 +1,179 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +{{- if (gt (int .Values.bloomGateway.replicas) 0) -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.bloomGatewayFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.bloomGatewayLabels" . | nindent 4 }} + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.bloomGateway.replicas }} + podManagementPolicy: Parallel + updateStrategy: + rollingUpdate: + partition: 0 + serviceName: {{ include "loki.bloomGatewayFullname" . }}-headless + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.bloomGateway.persistence.enableStatefulSetAutoDeletePVC) }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.bloomGateway.persistence.whenDeleted }} + whenScaled: {{ .Values.bloomGateway.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.bloomGatewaySelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomGateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.bloomGatewaySelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomGateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomGateway.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.bloomGatewayPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.bloomGateway.terminationGracePeriodSeconds }} + {{- with .Values.bloomGateway.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: bloom-gateway + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.bloomGateway.command }} + command: + - {{ coalesce .Values.bloomGateway.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=bloom-gateway + {{- with .Values.bloomGateway.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.bloomGateway.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.bloomGateway.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.bloomGateway.readinessProbe" . | nindent 10 }} + volumeMounts: + - name: temp + mountPath: /tmp + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.bloomGateway.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.bloomGateway.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.bloomGateway.extraContainers }} + {{- toYaml .Values.bloomGateway.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.bloomGateway.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bloomGateway.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: temp + emptyDir: {} + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- if not .Values.bloomGateway.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- with .Values.bloomGateway.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.bloomGateway.persistence.enabled }} + volumeClaimTemplates: + {{- range .Values.bloomGateway.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/production/helm/loki/templates/chunks-cache/poddisruptionbudget-chunks-cache.yaml b/production/helm/loki/templates/chunks-cache/poddisruptionbudget-chunks-cache.yaml new file mode 100644 index 000000000000..da95adf1379f --- /dev/null +++ b/production/helm/loki/templates/chunks-cache/poddisruptionbudget-chunks-cache.yaml @@ -0,0 +1,16 @@ +{{- if .Values.chunksCache.enabled }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.fullname" . }}-memcached-chunks-cache + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: memcached-chunks-cache +spec: + selector: + matchLabels: + {{- include "loki.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: memcached-chunks-cache + maxUnavailable: 1 +{{- end -}} diff --git a/production/helm/loki/templates/chunks-cache/service-chunks-cache-headless.yaml b/production/helm/loki/templates/chunks-cache/service-chunks-cache-headless.yaml new file mode 100644 index 000000000000..dc2ccd4b0290 --- /dev/null +++ b/production/helm/loki/templates/chunks-cache/service-chunks-cache-headless.yaml @@ -0,0 +1 @@ +{{- include "loki.memcached.service" (dict "ctx" $ "valuesSection" "chunksCache" "component" "chunks-cache" ) }} diff --git a/production/helm/loki/templates/chunks-cache/statefulset-chunks-cache.yaml b/production/helm/loki/templates/chunks-cache/statefulset-chunks-cache.yaml new file mode 100644 index 000000000000..6a54c577ca9b --- /dev/null +++ b/production/helm/loki/templates/chunks-cache/statefulset-chunks-cache.yaml @@ -0,0 +1 @@ +{{- include "loki.memcached.statefulSet" (dict "ctx" $ "valuesSection" "chunksCache" "component" "chunks-cache" ) }} diff --git a/production/helm/loki/templates/compactor/_helpers-compactor.tpl b/production/helm/loki/templates/compactor/_helpers-compactor.tpl new file mode 100644 index 000000000000..75c21db16747 --- /dev/null +++ b/production/helm/loki/templates/compactor/_helpers-compactor.tpl @@ -0,0 +1,81 @@ +{{/* +compactor fullname +*/}} +{{- define "loki.compactorFullname" -}} +{{ include "loki.fullname" . }}-compactor +{{- end }} + +{{/* +compactor common labels +*/}} +{{- define "loki.compactorLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor selector labels +*/}} +{{- define "loki.compactorSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: compactor +{{- end }} + +{{/* +compactor image +*/}} +{{- define "loki.compactorImage" -}} +{{- $dict := dict "loki" .Values.loki.image "service" .Values.compactor.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}} +{{- include "loki.lokiImage" $dict -}} +{{- end }} + +{{/* +compactor readinessProbe +*/}} +{{- define "loki.compactor.readinessProbe" -}} +{{- with .Values.compactor.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- else }} +{{- with .Values.loki.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +compactor livenessProbe +*/}} +{{- define "loki.compactor.livenessProbe" -}} +{{- with .Values.compactor.livenessProbe }} +livenessProbe: + {{- toYaml . | nindent 2 }} +{{- else }} +{{- with .Values.loki.livenessProbe }} +livenessProbe: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +compactor priority class name +*/}} +{{- define "loki.compactorPriorityClassName" }} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.compactor.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} + +{{/* +Create the name of the compactor service account +*/}} +{{- define "loki.compactorServiceAccountName" -}} +{{- if .Values.compactor.serviceAccount.create -}} + {{ default (print (include "loki.serviceAccountName" .) "-compactor") .Values.compactor.serviceAccount.name }} +{{- else -}} + {{ default (include "loki.serviceAccountName" .) .Values.compactor.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/production/helm/loki/templates/compactor/service-compactor.yaml b/production/helm/loki/templates/compactor/service-compactor.yaml new file mode 100644 index 000000000000..c75e1cee5ae1 --- /dev/null +++ b/production/helm/loki/templates/compactor/service-compactor.yaml @@ -0,0 +1,35 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.labels" . | nindent 4 }} + {{- with .Values.compactor.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + app.kubernetes.io/component: compactor + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.compactor.appProtocol.grpc }} + appProtocol: {{ .Values.compactor.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: compactor +{{- end }} diff --git a/production/helm/loki/templates/compactor/statefulset-compactor.yaml b/production/helm/loki/templates/compactor/statefulset-compactor.yaml new file mode 100644 index 000000000000..98fab0affc32 --- /dev/null +++ b/production/helm/loki/templates/compactor/statefulset-compactor.yaml @@ -0,0 +1,193 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.compactorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.compactorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.compactor.replicas }} + podManagementPolicy: Parallel + updateStrategy: + rollingUpdate: + partition: 0 + serviceName: {{ include "loki.compactorFullname" . }}-headless + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.compactor.persistence.enableStatefulSetAutoDeletePVC) }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.compactor.persistence.whenDeleted }} + whenScaled: {{ .Values.compactor.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.compactorSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compactor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.compactorSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compactor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + {{- with .Values.compactor.topologySpreadConstraints }} + topologySpreadConstraints: + {{- tpl . $ | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compactor.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.compactorPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.compactor.terminationGracePeriodSeconds }} + {{- with .Values.compactor.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: compactor + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.compactor.command }} + command: + - {{ coalesce .Values.compactor.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=compactor + {{- with .Values.compactor.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.compactor.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.compactor.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.compactor.readinessProbe" . | nindent 10 }} + {{- include "loki.compactor.livenessProbe" . | nindent 10 }} + volumeMounts: + - name: temp + mountPath: /tmp + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.compactor.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.compactor.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.compactor.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.compactor.extraContainers }} + {{- toYaml .Values.compactor.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.compactor.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compactor.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compactor.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: temp + emptyDir: {} + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- if not .Values.compactor.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- with .Values.compactor.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.compactor.persistence.enabled }} + volumeClaimTemplates: + {{- range .Values.compactor.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/config.yaml b/production/helm/loki/templates/config.yaml index 101abc353e26..fe47590078e5 100644 --- a/production/helm/loki/templates/config.yaml +++ b/production/helm/loki/templates/config.yaml @@ -1,4 +1,4 @@ -{{- if not .Values.loki.existingSecretForConfig -}} +{{- if .Values.loki.generatedConfigObjectName -}} apiVersion: v1 {{- if eq .Values.loki.configStorageType "Secret" }} kind: Secret @@ -6,7 +6,7 @@ kind: Secret kind: ConfigMap {{- end }} metadata: - name: {{ tpl .Values.loki.externalConfigSecretName . }} + name: {{ tpl .Values.loki.generatedConfigObjectName . }} namespace: {{ $.Release.Namespace }} labels: {{- include "loki.labels" . | nindent 4 }} diff --git a/production/helm/loki/templates/distributor/_helpers-distributor.tpl b/production/helm/loki/templates/distributor/_helpers-distributor.tpl new file mode 100644 index 000000000000..c23179e90501 --- /dev/null +++ b/production/helm/loki/templates/distributor/_helpers-distributor.tpl @@ -0,0 +1,32 @@ +{{/* +distributor fullname +*/}} +{{- define "loki.distributorFullname" -}} +{{ include "loki.fullname" . }}-distributor +{{- end }} + +{{/* +distributor common labels +*/}} +{{- define "loki.distributorLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor selector labels +*/}} +{{- define "loki.distributorSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: distributor +{{- end }} + +{{/* +distributor priority class name +*/}} +{{- define "loki.distributorPriorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.distributor.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/distributor/deployment-distributor.yaml b/production/helm/loki/templates/distributor/deployment-distributor.yaml new file mode 100644 index 000000000000..be66bfc6b524 --- /dev/null +++ b/production/helm/loki/templates/distributor/deployment-distributor.yaml @@ -0,0 +1,152 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "loki.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.distributorLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.distributor.autoscaling.enabled }} + replicas: {{ .Values.distributor.replicas }} +{{- end }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.distributor.maxSurge }} + maxUnavailable: 1 + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "loki.distributorSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.distributor.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.distributorSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.distributor.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.distributor.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.distributorPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.distributor.terminationGracePeriodSeconds }} + containers: + - name: distributor + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.distributor.command }} + command: + - {{ coalesce .Values.distributor.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=distributor + {{- if .Values.ingester.zoneAwareReplication.enabled }} + {{- if and (.Values.ingester.zoneAwareReplication.migration.enabled) (not .Values.ingester.zoneAwareReplication.migration.writePath) }} + - -distributor.zone-awareness-enabled=false + {{- else }} + - -distributor.zone-awareness-enabled=true + {{- end }} + {{- end }} + {{- with .Values.distributor.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.distributor.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.distributor.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + readinessProbe: + {{- toYaml .Values.loki.readinessProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.loki.livenessProbe | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.distributor.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.distributor.resources | nindent 12 }} + {{- if .Values.distributor.extraContainers }} + {{- toYaml .Values.distributor.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.distributor.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.distributor.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.distributor.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.distributor.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/production/helm/loki/templates/distributor/hpa.yaml b/production/helm/loki/templates/distributor/hpa.yaml new file mode 100644 index 000000000000..838a31004822 --- /dev/null +++ b/production/helm/loki/templates/distributor/hpa.yaml @@ -0,0 +1,54 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.distributor.autoscaling.enabled }} +{{- $apiVersion := include "loki.hpa.apiVersion" . -}} +apiVersion: {{ $apiVersion }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "loki.distributorFullname" . }} + labels: + {{- include "loki.distributorLabels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "loki.distributorFullname" . }} + minReplicas: {{ .Values.distributor.autoscaling.minReplicas }} + maxReplicas: {{ .Values.distributor.autoscaling.maxReplicas }} + metrics: + {{- with .Values.distributor.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.distributor.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.distributor.autoscaling.customMetrics }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.distributor.autoscaling.behavior.enabled }} + behavior: + {{- with .Values.distributor.autoscaling.behavior.scaleDown }} + scaleDown: {{ toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.distributor.autoscaling.behavior.scaleUp }} + scaleUp: {{ toYaml . | nindent 6 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/distributor/poddisruptionbudget-distributor.yaml b/production/helm/loki/templates/distributor/poddisruptionbudget-distributor.yaml new file mode 100644 index 000000000000..806a447f9fc9 --- /dev/null +++ b/production/helm/loki/templates/distributor/poddisruptionbudget-distributor.yaml @@ -0,0 +1,21 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.distributor.replicas) 1) }} +{{- if kindIs "invalid" .Values.distributor.maxUnavailable }} +{{- fail "`.Values.distributor.maxUnavailable` must be set when `.Values.distributor.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.distributorLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki.distributorSelectorLabels" . | nindent 6 }} + {{- with .Values.distributor.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/distributor/service-distributor-headless.yaml b/production/helm/loki/templates/distributor/service-distributor-headless.yaml new file mode 100644 index 000000000000..c69bb0add37e --- /dev/null +++ b/production/helm/loki/templates/distributor/service-distributor-headless.yaml @@ -0,0 +1,36 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.distributorFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.distributorSelectorLabels" . | nindent 4 }} + {{- with .Values.distributor.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + variant: headless + prometheus.io/service-monitor: "false" + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.distributor.appProtocol.grpc }} + appProtocol: {{ .Values.distributor.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.distributorSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/distributor/service-distributor.yaml b/production/helm/loki/templates/distributor/service-distributor.yaml new file mode 100644 index 000000000000..8145834d3509 --- /dev/null +++ b/production/helm/loki/templates/distributor/service-distributor.yaml @@ -0,0 +1,33 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.distributorFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.distributorLabels" . | nindent 4 }} + {{- with .Values.distributor.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.distributor.appProtocol.grpc }} + appProtocol: {{ .Values.distributor.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.distributorSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/gateway/_helpers-gateway.tpl b/production/helm/loki/templates/gateway/_helpers-gateway.tpl index 272814b6c0e1..39890b12e9b3 100644 --- a/production/helm/loki/templates/gateway/_helpers-gateway.tpl +++ b/production/helm/loki/templates/gateway/_helpers-gateway.tpl @@ -2,7 +2,7 @@ gateway fullname */}} {{- define "loki.gatewayFullname" -}} -{{ include "loki.name" . }}-gateway +{{ include "loki.fullname" . }}-gateway {{- end }} {{/* diff --git a/production/helm/loki/templates/gateway/configmap-gateway.yaml b/production/helm/loki/templates/gateway/configmap-gateway.yaml index fe98c73dc3a4..1c981a73a5b8 100644 --- a/production/helm/loki/templates/gateway/configmap-gateway.yaml +++ b/production/helm/loki/templates/gateway/configmap-gateway.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.gateway.enabled }} +{{- if and .Values.gateway.enabled (not (and .Values.enterprise.enabled .Values.enterprise.gelGateway)) }} apiVersion: v1 kind: ConfigMap metadata: diff --git a/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml new file mode 100644 index 000000000000..4f7dccac911e --- /dev/null +++ b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml @@ -0,0 +1,132 @@ +{{- if and .Values.gateway.enabled .Values.enterprise.enabled .Values.enterprise.gelGateway }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "loki.gatewayFullname" . }} + labels: + {{- include "loki.gatewayLabels" . | nindent 4 }} + {{- with .Values.enterpriseGateway.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- with .Values.enterpriseGateway.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.enterpriseGateway.replicas }} + selector: + matchLabels: + {{- include "loki.gatewaySelectorLabels" . | nindent 6 }} + strategy: + {{- toYaml .Values.enterpriseGateway.strategy | nindent 4 }} + template: + metadata: + labels: + {{- include "loki.gatewaySelectorLabels" . | nindent 8 }} + {{- with .Values.enterpriseGateway.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.useExternalConfig }} + checksum/config: {{ .Values.externalConfigVersion }} + {{- else }} + checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} + {{- end}} + {{- with .Values.enterpriseGateway.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "loki.serviceAccountName" . }} + {{- if .Values.enterpriseGateway.priorityClassName }} + priorityClassName: {{ .Values.enterpriseGateway.priorityClassName }} + {{- end }} + securityContext: + {{- toYaml .Values.enterpriseGateway.podSecurityContext | nindent 8 }} + initContainers: + {{- toYaml .Values.enterpriseGateway.initContainers | nindent 8 }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.enterpriseGateway.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: gateway + image: "{{ template "loki.image" . }}" + imagePullPolicy: {{ .Values.enterprise.image.pullPolicy }} + args: + - -target=gateway + - -config.file=/etc/loki/config/config.yaml + {{- if .Values.minio.enabled }} + - -admin.client.backend-type=s3 + - -admin.client.s3.endpoint={{ template "loki.minio" . }} + - -admin.client.s3.bucket-name=enterprise-logs-admin + - -admin.client.s3.access-key-id={{ .Values.minio.accessKey }} + - -admin.client.s3.secret-access-key={{ .Values.minio.secretKey }} + - -admin.client.s3.insecure=true + {{- end }} + {{- if .Values.enterpriseGateway.useDefaultProxyURLs }} + - -gateway.proxy.default.url=http://{{ template "loki.fullname" . }}-admin-api.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.admin-api.url=http://{{ template "loki.fullname" . }}-admin-api.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.distributor.url=dns:///{{ template "loki.fullname" . }}-distributor-headless.{{ .Release.Namespace }}.svc:9095 + - -gateway.proxy.ingester.url=http://{{ template "loki.fullname" . }}-ingester.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.query-frontend.url=http://{{ template "loki.fullname" . }}-query-frontend.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.ruler.url=http://{{ template "loki.fullname" . }}-ruler.{{ .Release.Namespace }}.svc:3100 + {{- end }} + {{- range $key, $value := .Values.enterpriseGateway.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: license + mountPath: /etc/loki/license + - name: storage + mountPath: /data + {{- if .Values.enterpriseGateway.extraVolumeMounts }} + {{ toYaml .Values.enterpriseGateway.extraVolumeMounts | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + readinessProbe: + {{- toYaml .Values.enterpriseGateway.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.enterpriseGateway.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.enterpriseGateway.containerSecurityContext | nindent 12 }} + env: + {{- if .Values.enterpriseGateway.env }} + {{ toYaml .Values.enterpriseGateway.env | nindent 12 }} + {{- end }} + {{- with .Values.enterpriseGateway.extraContainers }} + {{ toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.enterpriseGateway.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.enterpriseGateway.affinity | nindent 8 }} + tolerations: + {{- toYaml .Values.enterpriseGateway.tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.enterpriseGateway.terminationGracePeriodSeconds }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + - name: storage + emptyDir: {} + {{- if .Values.enterpriseGateway.extraVolumes }} + {{ toYaml .Values.enterpriseGateway.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/production/helm/loki/templates/gateway/deployment-gateway.yaml b/production/helm/loki/templates/gateway/deployment-gateway-nginx.yaml similarity index 96% rename from production/helm/loki/templates/gateway/deployment-gateway.yaml rename to production/helm/loki/templates/gateway/deployment-gateway-nginx.yaml index 4ffa0c935b0a..f20c49727589 100644 --- a/production/helm/loki/templates/gateway/deployment-gateway.yaml +++ b/production/helm/loki/templates/gateway/deployment-gateway-nginx.yaml @@ -1,4 +1,4 @@ -{{- if .Values.gateway.enabled }} +{{- if and .Values.gateway.enabled (not (and .Values.enterprise.enabled .Values.enterprise.gelGateway)) }} apiVersion: apps/v1 kind: Deployment metadata: @@ -61,7 +61,7 @@ spec: image: {{ include "loki.gatewayImage" . }} imagePullPolicy: {{ .Values.gateway.image.pullPolicy }} ports: - - name: http + - name: http-metrics containerPort: 8080 protocol: TCP {{- with .Values.gateway.extraEnv }} @@ -101,7 +101,7 @@ spec: {{- end }} {{- with .Values.gateway.affinity }} affinity: - {{- tpl . $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.gateway.dnsConfig }} dnsConfig: diff --git a/production/helm/loki/templates/gateway/service-gateway.yaml b/production/helm/loki/templates/gateway/service-gateway.yaml index 5cb7a55c3c80..8c710263d7d2 100644 --- a/production/helm/loki/templates/gateway/service-gateway.yaml +++ b/production/helm/loki/templates/gateway/service-gateway.yaml @@ -28,9 +28,9 @@ spec: loadBalancerIP: {{ .Values.gateway.service.loadBalancerIP }} {{- end }} ports: - - name: http + - name: http-metrics port: {{ .Values.gateway.service.port }} - targetPort: http + targetPort: http-metrics {{- if and (eq "NodePort" .Values.gateway.service.type) .Values.gateway.service.nodePort }} nodePort: {{ .Values.gateway.service.nodePort }} {{- end }} diff --git a/production/helm/loki/templates/index-gateway/_helpers-index-gateway.tpl b/production/helm/loki/templates/index-gateway/_helpers-index-gateway.tpl new file mode 100644 index 000000000000..f42dff3d0636 --- /dev/null +++ b/production/helm/loki/templates/index-gateway/_helpers-index-gateway.tpl @@ -0,0 +1,40 @@ +{{/* +index-gateway fullname +*/}} +{{- define "loki.indexGatewayFullname" -}} +{{ include "loki.fullname" . }}-index-gateway +{{- end }} + +{{/* +index-gateway common labels +*/}} +{{- define "loki.indexGatewayLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: index-gateway +{{- end }} + +{{/* +index-gateway selector labels +*/}} +{{- define "loki.indexGatewaySelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: index-gateway +{{- end }} + +{{/* +index-gateway image +*/}} +{{- define "loki.indexGatewayImage" -}} +{{- $dict := dict "loki" .Values.loki.image "service" .Values.indexGateway.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}} +{{- include "loki.lokiImage" $dict -}} +{{- end }} + +{{/* +index-gateway priority class name +*/}} +{{- define "loki.indexGatewayPriorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.indexGateway.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/index-gateway/poddisruptionbudget-index-gateway.yaml b/production/helm/loki/templates/index-gateway/poddisruptionbudget-index-gateway.yaml new file mode 100644 index 000000000000..22ba1a0b4c33 --- /dev/null +++ b/production/helm/loki/templates/index-gateway/poddisruptionbudget-index-gateway.yaml @@ -0,0 +1,20 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.indexGateway.replicas) 1) }} +{{- if kindIs "invalid" .Values.indexGateway.maxUnavailable }} +{{- fail "`.Values.indexGateway.maxUnavailable` must be set when `.Values.indexGateway.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.indexGatewayFullname" . }} + labels: + {{- include "loki.indexGatewayLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki.indexGatewaySelectorLabels" . | nindent 6 }} + {{- with .Values.indexGateway.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/index-gateway/service-index-gateway-headless.yaml b/production/helm/loki/templates/index-gateway/service-index-gateway-headless.yaml new file mode 100644 index 000000000000..b0c90dc35fd9 --- /dev/null +++ b/production/helm/loki/templates/index-gateway/service-index-gateway-headless.yaml @@ -0,0 +1,27 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.indexGatewayFullname" . }}-headless + labels: + {{- include "loki.indexGatewaySelectorLabels" . | nindent 4 }} + prometheus.io/service-monitor: "false" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- with .Values.indexGateway.appProtocol.grpc }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "loki.indexGatewaySelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/production/helm/loki/templates/index-gateway/service-index-gateway.yaml b/production/helm/loki/templates/index-gateway/service-index-gateway.yaml new file mode 100644 index 000000000000..2d43bb0ed5e9 --- /dev/null +++ b/production/helm/loki/templates/index-gateway/service-index-gateway.yaml @@ -0,0 +1,32 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.indexGatewayFullname" . }} + labels: + {{- include "loki.indexGatewayLabels" . | nindent 4 }} + {{- with .Values.indexGateway.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- with .Values.indexGateway.appProtocol.grpc }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "loki.indexGatewaySelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/production/helm/loki/templates/index-gateway/statefulset-index-gateway.yaml b/production/helm/loki/templates/index-gateway/statefulset-index-gateway.yaml new file mode 100644 index 000000000000..5797185ef052 --- /dev/null +++ b/production/helm/loki/templates/index-gateway/statefulset-index-gateway.yaml @@ -0,0 +1,186 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.indexGatewayFullname" . }} + labels: + {{- include "loki.indexGatewayLabels" . | nindent 4 }} + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.indexGateway.replicas }} + updateStrategy: + rollingUpdate: + partition: 0 + serviceName: {{ include "loki.indexGatewayFullname" . }}-headless + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.indexGateway.persistence.enableStatefulSetAutoDeletePVC) }} + {{/* + Data on the read nodes is easy to replace, so we want to always delete PVCs to make + operation easier, and will rely on re-fetching data when needed. + */}} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.indexGateway.persistence.whenDeleted }} + whenScaled: {{ .Values.indexGateway.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.indexGatewaySelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexGateway.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.indexGatewaySelectorLabels" . | nindent 8 }} + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexGateway.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.indexGateway.joinMemberlist }} + app.kubernetes.io/part-of: memberlist + {{- end }} + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexGateway.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.indexGatewayPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.indexGateway.terminationGracePeriodSeconds }} + {{- with .Values.indexGateway.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: index-gateway + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=index-gateway + {{- with .Values.indexGateway.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + {{- if .Values.indexGateway.joinMemberlist }} + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- end }} + {{- with .Values.indexGateway.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.indexGateway.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + readinessProbe: + {{- toYaml .Values.loki.readinessProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.loki.livenessProbe | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.indexGateway.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.indexGateway.resources | nindent 12 }} + {{- if .Values.indexGateway.extraContainers }} + {{- toYaml .Values.indexGateway.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.indexGateway.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexGateway.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexGateway.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.indexGateway.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.indexGateway.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.indexGateway.persistence.inMemory }} + - name: data + {{- if .Values.indexGateway.persistence.inMemory }} + emptyDir: + medium: Memory + {{- end }} + {{- if .Values.indexGateway.persistence.size }} + sizeLimit: {{ .Values.indexGateway.persistence.size }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.indexGateway.persistence.annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .Values.indexGateway.persistence.storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .Values.indexGateway.persistence.size | quote }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ingester/_helpers-ingester.tpl b/production/helm/loki/templates/ingester/_helpers-ingester.tpl new file mode 100644 index 000000000000..418d4094d5ff --- /dev/null +++ b/production/helm/loki/templates/ingester/_helpers-ingester.tpl @@ -0,0 +1,74 @@ +{{/* +ingester fullname +*/}} +{{- define "loki.ingesterFullname" -}} +{{ include "loki.fullname" . }}-ingester +{{- end }} + +{{/* +ingester common labels +*/}} +{{- define "loki.ingesterLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester selector labels +*/}} +{{- define "loki.ingesterSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: ingester +{{- end }} + +{{/* +ingester priority class name +*/}} +{{- define "loki.ingesterPriorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.ingester.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} + +{{- define "loki.ingester.readinessProbe" -}} +{{- with .Values.ingester.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- else }} +{{- with .Values.loki.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} + +{{- define "loki.ingester.livenessProbe" -}} +{{- with .Values.ingester.livenessProbe }} +livenessProbe: + {{- toYaml . | nindent 2 }} +{{- else }} +{{- with .Values.loki.livenessProbe }} +livenessProbe: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +expects global context +*/}} +{{- define "loki.ingester.replicaCount" -}} +{{- ceil (divf .Values.ingester.replicas 3) -}} +{{- end -}} + +{{/* +expects a dict +{ + "replicas": replicas in a zone, + "ctx": global context +} +*/}} +{{- define "loki.ingester.maxUnavailable" -}} +{{- ceil (mulf .replicas (divf (int .ctx.Values.ingester.zoneAwareReplication.maxUnavailablePct) 100)) -}} +{{- end -}} \ No newline at end of file diff --git a/production/helm/loki/templates/ingester/hpa.yaml b/production/helm/loki/templates/ingester/hpa.yaml new file mode 100644 index 000000000000..2e6a2d193964 --- /dev/null +++ b/production/helm/loki/templates/ingester/hpa.yaml @@ -0,0 +1,55 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ingester.autoscaling.enabled }} +{{- $apiVersion := include "loki.hpa.apiVersion" . -}} +apiVersion: {{ $apiVersion }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "loki.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Statefulset + name: {{ include "loki.ingesterFullname" . }} + minReplicas: {{ .Values.ingester.autoscaling.minReplicas }} + maxReplicas: {{ .Values.ingester.autoscaling.maxReplicas }} + metrics: + {{- with .Values.ingester.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.ingester.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.ingester.autoscaling.customMetrics }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.ingester.autoscaling.behavior.enabled }} + behavior: + {{- with .Values.ingester.autoscaling.behavior.scaleDown }} + scaleDown: {{ toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.ingester.autoscaling.behavior.scaleUp }} + scaleUp: {{ toYaml . | nindent 6 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ingester/poddisruptionbudget-ingester-rollout.yaml b/production/helm/loki/templates/ingester/poddisruptionbudget-ingester-rollout.yaml new file mode 100644 index 000000000000..000ab8569ad0 --- /dev/null +++ b/production/helm/loki/templates/ingester/poddisruptionbudget-ingester-rollout.yaml @@ -0,0 +1,21 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.ingester.replicas) 1) (.Values.ingester.zoneAwareReplication.enabled) }} +{{- if kindIs "invalid" .Values.ingester.maxUnavailable }} +{{- fail "`.Values.ingester.maxUnavailable` must be set when `.Values.ingester.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.ingesterFullname" . }}-rollout + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + rollout-group: ingester + {{- with .Values.ingester.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ingester/poddisruptionbudget-ingester.yaml b/production/helm/loki/templates/ingester/poddisruptionbudget-ingester.yaml new file mode 100644 index 000000000000..1142c010218a --- /dev/null +++ b/production/helm/loki/templates/ingester/poddisruptionbudget-ingester.yaml @@ -0,0 +1,27 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.ingester.replicas) 1) (or (not .Values.ingester.zoneAwareReplication.enabled) .Values.ingester.zoneAwareReplication.migration.enabled) }} +{{- if kindIs "invalid" .Values.ingester.maxUnavailable }} +{{- fail "`.Values.ingester.maxUnavailable` must be set when `.Values.ingester.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki.ingesterSelectorLabels" . | nindent 6 }} + {{/* zone aware ingesters get their own pod disruption budget, ignore them here */}} + matchExpressions: + - key: rollout-group + operator: NotIn + values: + - "ingester" + {{- with .Values.ingester.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ingester/service-ingester-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-headless.yaml new file mode 100644 index 000000000000..e83dcf7be4fe --- /dev/null +++ b/production/helm/loki/templates/ingester/service-ingester-headless.yaml @@ -0,0 +1,32 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (or (not .Values.ingester.zoneAwareReplication.enabled) .Values.ingester.zoneAwareReplication.migration.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.ingesterFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterSelectorLabels" . | nindent 4 }} + prometheus.io/service-monitor: "false" + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.ingester.appProtocol.grpc }} + appProtocol: {{ .Values.ingester.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.ingesterSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/ingester/service-ingester-zone-a-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-zone-a-headless.yaml new file mode 100644 index 000000000000..478ea8c89eff --- /dev/null +++ b/production/helm/loki/templates/ingester/service-ingester-zone-a-headless.yaml @@ -0,0 +1,35 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ingester.zoneAwareReplication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.ingesterFullname" . }}-zone-a-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.ingester.appProtocol.grpc }} + appProtocol: {{ .Values.ingester.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.ingesterSelectorLabels" . | nindent 4 }} + name: ingester-zone-a + rollout-group: ingester +{{- end -}} diff --git a/production/helm/loki/templates/ingester/service-ingester-zone-b-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-zone-b-headless.yaml new file mode 100644 index 000000000000..c19ed4cb1f65 --- /dev/null +++ b/production/helm/loki/templates/ingester/service-ingester-zone-b-headless.yaml @@ -0,0 +1,35 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ingester.zoneAwareReplication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.ingesterFullname" . }}-zone-b-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.ingester.appProtocol.grpc }} + appProtocol: {{ .Values.ingester.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.ingesterSelectorLabels" . | nindent 4 }} + name: ingester-zone-b + rollout-group: ingester +{{- end -}} diff --git a/production/helm/loki/templates/ingester/service-ingester-zone-c-headless.yaml b/production/helm/loki/templates/ingester/service-ingester-zone-c-headless.yaml new file mode 100644 index 000000000000..2757fcef9400 --- /dev/null +++ b/production/helm/loki/templates/ingester/service-ingester-zone-c-headless.yaml @@ -0,0 +1,35 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ingester.zoneAwareReplication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.ingesterFullname" . }}-zone-c-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.ingester.appProtocol.grpc }} + appProtocol: {{ .Values.ingester.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.ingesterSelectorLabels" . | nindent 4 }} + name: ingester-zone-c + rollout-group: ingester +{{- end -}} diff --git a/production/helm/loki/templates/ingester/service-ingester.yaml b/production/helm/loki/templates/ingester/service-ingester.yaml new file mode 100644 index 000000000000..d762cbf65d95 --- /dev/null +++ b/production/helm/loki/templates/ingester/service-ingester.yaml @@ -0,0 +1,33 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (or (not .Values.ingester.zoneAwareReplication.enabled) .Values.ingester.zoneAwareReplication.migration.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + {{- with .Values.ingester.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.ingester.appProtocol.grpc }} + appProtocol: {{ .Values.ingester.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.ingesterSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/ingester/statefulset-ingester-zone-a.yaml b/production/helm/loki/templates/ingester/statefulset-ingester-zone-a.yaml new file mode 100644 index 000000000000..13c7018e53e2 --- /dev/null +++ b/production/helm/loki/templates/ingester/statefulset-ingester-zone-a.yaml @@ -0,0 +1,232 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ingester.zoneAwareReplication.enabled }} +{{- $replicas := (include "loki.ingester.replicaCount" .) -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.ingesterFullname" . }}-zone-a + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + rollout-group: ingester + name: ingester-zone-a + annotations: + rollout-max-unavailable: "{{ include "loki.ingester.maxUnavailable" (dict "ctx" . "replicas" $replicas)}}" + {{- with .Values.loki.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneA.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ $replicas }} +{{- end }} + podManagementPolicy: Parallel + serviceName: {{ include "loki.ingesterFullname" . }}-zone-a + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }} + {{/* + Data on the read nodes is easy to replace, so we want to always delete PVCs to make + operation easier, and will rely on re-fetching data when needed. + */}} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.ingester.persistence.whenDeleted }} + whenScaled: {{ .Values.ingester.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.ingesterSelectorLabels" . | nindent 6 }} + name: ingester-zone-a + rollout-group: ingester + updateStrategy: + type: OnDelete + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneA.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.ingesterSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + name: ingester-zone-a + rollout-group: ingester + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + {{- with .Values.ingester.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.ingesterPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + {{- with .Values.ingester.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: ingester + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.ingester.command }} + command: + - {{ coalesce .Values.ingester.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -ingester.availability-zone=zone-a + - -ingester.unregister-on-shutdown=false + - -ingester.tokens-file-path=/var/loki/ring-tokens + - -target=ingester + {{- with .Values.ingester.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.ingester.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.ingester.readinessProbe" . | nindent 10 }} + {{- include "loki.ingester.livenessProbe" . | nindent 10 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.ingester.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8}} + {{- end }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - ingester + - key: name + operator: NotIn + values: + - ingester-zone-a + topologyKey: kubernetes.io/hostname + {{- with .Values.ingester.zoneAwareReplication.zoneA.extraAffinity }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneA.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.ingester.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.ingester.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.ingester.persistence.inMemory }} + - name: data + {{- if .Values.ingester.persistence.inMemory }} + emptyDir: + medium: Memory + {{- end }} + {{- if .Values.ingester.persistence.size }} + sizeLimit: {{ .Values.ingester.persistence.size }} + {{- end }} + {{- else }} + volumeClaimTemplates: + {{- range .Values.ingester.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ingester/statefulset-ingester-zone-b.yaml b/production/helm/loki/templates/ingester/statefulset-ingester-zone-b.yaml new file mode 100644 index 000000000000..a0c7b85f8a14 --- /dev/null +++ b/production/helm/loki/templates/ingester/statefulset-ingester-zone-b.yaml @@ -0,0 +1,232 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ingester.zoneAwareReplication.enabled }} +{{- $replicas := (include "loki.ingester.replicaCount" .) -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.ingesterFullname" . }}-zone-b + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + rollout-group: ingester + name: ingester-zone-b + annotations: + rollout-max-unavailable: "{{ include "loki.ingester.maxUnavailable" (dict "ctx" . "replicas" $replicas)}}" + {{- with .Values.loki.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneB.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ $replicas }} +{{- end }} + podManagementPolicy: Parallel + serviceName: {{ include "loki.ingesterFullname" . }}-zone-b + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }} + {{/* + Data on the read nodes is easy to replace, so we want to always delete PVCs to make + operation easier, and will rely on re-fetching data when needed. + */}} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.ingester.persistence.whenDeleted }} + whenScaled: {{ .Values.ingester.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.ingesterSelectorLabels" . | nindent 6 }} + name: ingester-zone-b + rollout-group: ingester + updateStrategy: + type: OnDelete + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneB.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.ingesterSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + name: ingester-zone-b + rollout-group: ingester + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + {{- with .Values.ingester.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.ingesterPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + {{- with .Values.ingester.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: ingester + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.ingester.command }} + command: + - {{ coalesce .Values.ingester.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -ingester.availability-zone=zone-b + - -ingester.unregister-on-shutdown=false + - -ingester.tokens-file-path=/var/loki/ring-tokens + - -target=ingester + {{- with .Values.ingester.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.ingester.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.ingester.readinessProbe" . | nindent 10 }} + {{- include "loki.ingester.livenessProbe" . | nindent 10 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.ingester.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8}} + {{- end }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - ingester + - key: name + operator: NotIn + values: + - ingester-zone-b + topologyKey: kubernetes.io/hostname + {{- with .Values.ingester.zoneAwareReplication.zoneA.extraAffinity }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneA.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.ingester.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.ingester.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.ingester.persistence.inMemory }} + - name: data + {{- if .Values.ingester.persistence.inMemory }} + emptyDir: + medium: Memory + {{- end }} + {{- if .Values.ingester.persistence.size }} + sizeLimit: {{ .Values.ingester.persistence.size }} + {{- end }} + {{- else }} + volumeClaimTemplates: + {{- range .Values.ingester.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ingester/statefulset-ingester-zone-c.yaml b/production/helm/loki/templates/ingester/statefulset-ingester-zone-c.yaml new file mode 100644 index 000000000000..cc65f49b244c --- /dev/null +++ b/production/helm/loki/templates/ingester/statefulset-ingester-zone-c.yaml @@ -0,0 +1,232 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ingester.zoneAwareReplication.enabled }} +{{- $replicas := (include "loki.ingester.replicaCount" .) -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.ingesterFullname" . }}-zone-c + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + rollout-group: ingester + name: ingester-zone-c + annotations: + rollout-max-unavailable: "{{ include "loki.ingester.maxUnavailable" (dict "ctx" . "replicas" $replicas)}}" + {{- with .Values.loki.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneC.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ $replicas }} +{{- end }} + podManagementPolicy: Parallel + serviceName: {{ include "loki.ingesterFullname" . }}-zone-c + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }} + {{/* + Data on the read nodes is easy to replace, so we want to always delete PVCs to make + operation easier, and will rely on re-fetching data when needed. + */}} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.ingester.persistence.whenDeleted }} + whenScaled: {{ .Values.ingester.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.ingesterSelectorLabels" . | nindent 6 }} + name: ingester-zone-c + rollout-group: ingester + updateStrategy: + type: OnDelete + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneC.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.ingesterSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + name: ingester-zone-c + rollout-group: ingester + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + {{- with .Values.ingester.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.ingesterPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + {{- with .Values.ingester.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: ingester + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.ingester.command }} + command: + - {{ coalesce .Values.ingester.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -ingester.availability-zone=zone-c + - -ingester.unregister-on-shutdown=false + - -ingester.tokens-file-path=/var/loki/ring-tokens + - -target=ingester + {{- with .Values.ingester.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.ingester.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.ingester.readinessProbe" . | nindent 10 }} + {{- include "loki.ingester.livenessProbe" . | nindent 10 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.ingester.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8}} + {{- end }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - ingester + - key: name + operator: NotIn + values: + - ingester-zone-c + topologyKey: kubernetes.io/hostname + {{- with .Values.ingester.zoneAwareReplication.zoneA.extraAffinity }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.zoneAwareReplication.zoneA.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.ingester.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.ingester.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.ingester.persistence.inMemory }} + - name: data + {{- if .Values.ingester.persistence.inMemory }} + emptyDir: + medium: Memory + {{- end }} + {{- if .Values.ingester.persistence.size }} + sizeLimit: {{ .Values.ingester.persistence.size }} + {{- end }} + {{- else }} + volumeClaimTemplates: + {{- range .Values.ingester.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ingester/statefulset-ingester.yaml b/production/helm/loki/templates/ingester/statefulset-ingester.yaml new file mode 100644 index 000000000000..d20a02e68f7c --- /dev/null +++ b/production/helm/loki/templates/ingester/statefulset-ingester.yaml @@ -0,0 +1,204 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (or (not .Values.ingester.zoneAwareReplication.enabled) .Values.ingester.zoneAwareReplication.migration.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.ingesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.ingesterLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicas }} +{{- end }} + podManagementPolicy: Parallel + updateStrategy: + rollingUpdate: + partition: 0 + serviceName: {{ include "loki.ingesterFullname" . }}-headless + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }} + {{/* + Data on the read nodes is easy to replace, so we want to always delete PVCs to make + operation easier, and will rely on re-fetching data when needed. + */}} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.ingester.persistence.whenDeleted }} + whenScaled: {{ .Values.ingester.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.ingesterSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.ingesterSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + {{- with .Values.ingester.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.ingesterPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ingester.terminationGracePeriodSeconds }} + {{- with .Values.ingester.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: ingester + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.ingester.command }} + command: + - {{ coalesce .Values.ingester.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -ingester.ring.instance-availability-zone=zone-default + - -target=ingester + {{- with .Values.ingester.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.ingester.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.ingester.readinessProbe" . | nindent 10 }} + {{- include "loki.ingester.livenessProbe" . | nindent 10 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.ingester.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ingester.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.ingester.extraContainers }} + {{- toYaml .Values.ingester.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.ingester.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.ingester.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.ingester.persistence.enabled }} + - name: data + emptyDir: { } + {{- else if .Values.ingester.persistence.inMemory }} + - name: data + {{- if .Values.ingester.persistence.inMemory }} + emptyDir: + medium: Memory + {{- end }} + {{- if .Values.ingester.persistence.size }} + sizeLimit: {{ .Values.ingester.persistence.size }} + {{- end }} + {{- else }} + volumeClaimTemplates: + {{- range .Values.ingester.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/loki-canary/_helpers.tpl b/production/helm/loki/templates/loki-canary/_helpers.tpl index 2ea8dd75450f..01e588c8d10a 100644 --- a/production/helm/loki/templates/loki-canary/_helpers.tpl +++ b/production/helm/loki/templates/loki-canary/_helpers.tpl @@ -25,7 +25,7 @@ app.kubernetes.io/component: canary Docker image name for loki-canary */}} {{- define "loki-canary.image" -}} -{{- $dict := dict "service" .Values.monitoring.lokiCanary.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}} +{{- $dict := dict "service" .Values.lokiCanary.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}} {{- include "loki.baseImage" $dict -}} {{- end -}} @@ -33,7 +33,7 @@ Docker image name for loki-canary canary priority class name */}} {{- define "loki-canary.priorityClassName" -}} -{{- $pcn := coalesce .Values.global.priorityClassName .Values.monitoring.lokiCanary.priorityClassName .Values.read.priorityClassName -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.lokiCanary.priorityClassName .Values.read.priorityClassName -}} {{- if $pcn }} priorityClassName: {{ $pcn }} {{- end }} diff --git a/production/helm/loki/templates/loki-canary/daemonset.yaml b/production/helm/loki/templates/loki-canary/daemonset.yaml index 250d1a8ade31..e9998dcef67f 100644 --- a/production/helm/loki/templates/loki-canary/daemonset.yaml +++ b/production/helm/loki/templates/loki-canary/daemonset.yaml @@ -1,4 +1,4 @@ -{{- with .Values.monitoring.lokiCanary -}} +{{- with .Values.lokiCanary -}} {{- if .enabled -}} --- apiVersion: apps/v1 @@ -52,11 +52,18 @@ spec: - -user={{ $.Values.monitoring.selfMonitoring.tenant.name }} - -tenant-id={{ $.Values.monitoring.selfMonitoring.tenant.name }} {{- end }} + {{- if .push }} + - -push=true + {{- end }} {{- with .extraArgs }} {{- toYaml . | nindent 12 }} {{- end }} securityContext: {{- toYaml $.Values.loki.containerSecurityContext | nindent 12 }} + volumeMounts: + {{- with $.Values.lokiCanary.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} ports: - name: http-metrics containerPort: 3500 @@ -107,5 +114,9 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} + volumes: + {{- with $.Values.lokiCanary.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- end }} {{- end }} diff --git a/production/helm/loki/templates/loki-canary/service.yaml b/production/helm/loki/templates/loki-canary/service.yaml index d0fb34e38bf8..38022a3e3193 100644 --- a/production/helm/loki/templates/loki-canary/service.yaml +++ b/production/helm/loki/templates/loki-canary/service.yaml @@ -1,4 +1,4 @@ -{{- with .Values.monitoring.lokiCanary -}} +{{- with .Values.lokiCanary -}} {{- if .enabled -}} --- apiVersion: v1 diff --git a/production/helm/loki/templates/loki-canary/serviceaccount.yaml b/production/helm/loki/templates/loki-canary/serviceaccount.yaml index dbcd2b345faa..2c1f79a68274 100644 --- a/production/helm/loki/templates/loki-canary/serviceaccount.yaml +++ b/production/helm/loki/templates/loki-canary/serviceaccount.yaml @@ -1,4 +1,4 @@ -{{- with .Values.monitoring.lokiCanary -}} +{{- with .Values.lokiCanary -}} {{- if .enabled -}} --- apiVersion: v1 diff --git a/production/helm/loki/templates/memcached/_memcached-statefulset.tpl b/production/helm/loki/templates/memcached/_memcached-statefulset.tpl new file mode 100644 index 000000000000..32fd62450267 --- /dev/null +++ b/production/helm/loki/templates/memcached/_memcached-statefulset.tpl @@ -0,0 +1,159 @@ +{{/* +memcached StatefulSet +Params: + ctx = . context + valuesSection = name of the section in values.yaml + component = name of the component +valuesSection and component are specified separately because helm prefers camelcase for naming convetion and k8s components are named with snake case. +*/}} +{{- define "loki.memcached.statefulSet" -}} +{{ with (index $.ctx.Values $.valuesSection) }} +{{- if .enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.resourceName" (dict "ctx" $.ctx "component" $.component) }} + labels: + {{- include "loki.labels" $.ctx | nindent 4 }} + app.kubernetes.io/component: "memcached-{{ $.component }}" + name: "memcached-{{ $.component }}" + annotations: + {{- toYaml .annotations | nindent 4 }} + namespace: {{ $.ctx.Release.Namespace | quote }} +spec: + podManagementPolicy: {{ .podManagementPolicy }} + replicas: {{ .replicas }} + selector: + matchLabels: + {{- include "loki.selectorLabels" $.ctx | nindent 6 }} + app.kubernetes.io/component: "memcached-{{ $.component }}" + name: "memcached-{{ $.component }}" + updateStrategy: + {{- toYaml .statefulStrategy | nindent 4 }} + serviceName: {{ template "loki.fullname" $.ctx }}-{{ $.component }} + + template: + metadata: + labels: + {{- include "loki.selectorLabels" $.ctx | nindent 8 }} + app.kubernetes.io/component: "memcached-{{ $.component }}" + name: "memcached-{{ $.component }}" + {{- with $.ctx.Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + {{- with $.ctx.Values.global.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + + spec: + serviceAccountName: {{ template "loki.serviceAccountName" $.ctx }} + {{- if .priorityClassName }} + priorityClassName: {{ .priorityClassName }} + {{- end }} + securityContext: + {{- toYaml $.ctx.Values.memcached.podSecurityContext | nindent 8 }} + initContainers: + {{- toYaml .initContainers | nindent 8 }} + nodeSelector: + {{- toYaml .nodeSelector | nindent 8 }} + affinity: + {{- toYaml .affinity | nindent 8 }} + topologySpreadConstraints: + {{- toYaml .topologySpreadConstraints | nindent 8 }} + tolerations: + {{- toYaml .tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .terminationGracePeriodSeconds }} + {{- if $.ctx.Values.imagePullSecrets }} + imagePullSecrets: + {{- range $.ctx.Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + {{- if .extraVolumes }} + volumes: + {{- toYaml .extraVolumes | nindent 8 }} + {{- end }} + containers: + {{- if .extraContainers }} + {{ toYaml .extraContainers | nindent 8 }} + {{- end }} + - name: memcached + {{- with $.ctx.Values.memcached.image }} + image: {{ .repository }}:{{ .tag }} + imagePullPolicy: {{ .pullPolicy }} + {{- end }} + resources: + {{- if .resources }} + {{- toYaml .resources | nindent 12 }} + {{- else }} + {{- /* Calculate requested memory as round(allocatedMemory * 1.2). But with integer built-in operators. */}} + {{- $requestMemory := div (add (mul .allocatedMemory 12) 5) 10 }} + limits: + memory: {{ $requestMemory }}Mi + requests: + cpu: 500m + memory: {{ $requestMemory }}Mi + {{- end }} + ports: + - containerPort: {{ .port }} + name: client + args: + - -m {{ .allocatedMemory }} + - --extended=modern,track_sizes{{ with .extraExtendedOptions }},{{ . }}{{ end }} + - -I {{ .maxItemMemory }}m + - -c {{ .connectionLimit }} + - -v + - -u {{ .port }} + {{- range $key, $value := .extraArgs }} + - "-{{ $key }}{{ if $value }} {{ $value }}{{ end }}" + {{- end }} + env: + {{- with $.ctx.Values.global.extraEnv }} + {{ toYaml . | nindent 12 }} + {{- end }} + envFrom: + {{- with $.ctx.Values.global.extraEnvFrom }} + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml $.ctx.Values.memcached.containerSecurityContext | nindent 12 }} + {{- if .extraVolumeMounts }} + volumeMounts: + {{- toYaml .extraVolumeMounts | nindent 12 }} + {{- end }} + + {{- if $.ctx.Values.memcachedExporter.enabled }} + - name: exporter + {{- with $.ctx.Values.memcachedExporter.image }} + image: {{ .repository}}:{{ .tag }} + imagePullPolicy: {{ .pullPolicy }} + {{- end }} + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:{{ .port }}" + - "--web.listen-address=0.0.0.0:9150" + {{- range $key, $value := $.ctx.Values.memcachedExporter.extraArgs }} + - "--{{ $key }}{{ if $value }}={{ $value }}{{ end }}" + {{- end }} + resources: + {{- toYaml $.ctx.Values.memcachedExporter.resources | nindent 12 }} + securityContext: + {{- toYaml $.ctx.Values.memcachedExporter.containerSecurityContext | nindent 12 }} + {{- if .extraVolumeMounts }} + volumeMounts: + {{- toYaml .extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} +{{- end -}} + diff --git a/production/helm/loki/templates/memcached/_memcached-svc.tpl b/production/helm/loki/templates/memcached/_memcached-svc.tpl new file mode 100644 index 000000000000..8574151978a2 --- /dev/null +++ b/production/helm/loki/templates/memcached/_memcached-svc.tpl @@ -0,0 +1,42 @@ +{{/* +memcached Service +Params: + ctx = . context + valuesSection = name of the section in values.yaml + component = name of the component +valuesSection and component are specified separately because helm prefers camelcase for naming convetion and k8s components are named with snake case. +*/}} +{{- define "loki.memcached.service" -}} +{{ with (index $.ctx.Values $.valuesSection) }} +{{- if .enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.resourceName" (dict "ctx" $.ctx "component" $.component) }} + labels: + {{- include "loki.labels" $.ctx | nindent 4 }} + app.kubernetes.io/component: "memcached-{{ $.component }}" + {{- with .service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .service.annotations | nindent 4 }} + namespace: {{ $.ctx.Release.Namespace | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: {{ .port }} + targetPort: {{ .port }} + {{ if $.ctx.Values.memcachedExporter.enabled -}} + - name: http-metrics + port: 9150 + targetPort: 9150 + {{ end }} + selector: + {{- include "loki.selectorLabels" $.ctx | nindent 4 }} + app.kubernetes.io/component: "memcached-{{ $.component }}" +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/production/helm/loki/templates/monitoring/logs-instance.yaml b/production/helm/loki/templates/monitoring/logs-instance.yaml index 58d5fb045c0c..5ae19179a1ef 100644 --- a/production/helm/loki/templates/monitoring/logs-instance.yaml +++ b/production/helm/loki/templates/monitoring/logs-instance.yaml @@ -27,4 +27,4 @@ spec: matchLabels: {{- include "loki.selectorLabels" $ | nindent 6 }} {{- end -}} -{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/production/helm/loki/templates/pattern-ingester/_helpers-pattern-ingester.tpl b/production/helm/loki/templates/pattern-ingester/_helpers-pattern-ingester.tpl new file mode 100644 index 000000000000..5477214a0b5a --- /dev/null +++ b/production/helm/loki/templates/pattern-ingester/_helpers-pattern-ingester.tpl @@ -0,0 +1,58 @@ +{{/* +pattern ingester fullname +*/}} +{{- define "loki.patternIngesterFullname" -}} +{{ include "loki.fullname" . }}-pattern-ingester +{{- end }} + +{{/* +pattern ingester common labels +*/}} +{{- define "loki.patternIngesterLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: pattern-ingester +{{- end }} + +{{/* +pattern ingester selector labels +*/}} +{{- define "loki.patternIngesterSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: pattern-ingester +{{- end }} + +{{/* +pattern ingester readinessProbe +*/}} +{{- define "loki.patternIngester.readinessProbe" -}} +{{- with .Values.patternIngester.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- else }} +{{- with .Values.loki.readinessProbe }} +readinessProbe: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +pattern ingester priority class name +*/}} +{{- define "loki.patternIngesterPriorityClassName" }} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.patternIngester.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} + +{{/* +Create the name of the pattern ingester service account +*/}} +{{- define "loki.patternIngesterServiceAccountName" -}} +{{- if .Values.patternIngester.serviceAccount.create -}} + {{ default (print (include "loki.serviceAccountName" .) "-pattern-ingester") .Values.patternIngester.serviceAccount.name }} +{{- else -}} + {{ default (include "loki.serviceAccountName" .) .Values.patternIngester.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/production/helm/loki/templates/pattern-ingester/statefulset-pattern-ingester.yaml b/production/helm/loki/templates/pattern-ingester/statefulset-pattern-ingester.yaml new file mode 100644 index 000000000000..4666dd6197b8 --- /dev/null +++ b/production/helm/loki/templates/pattern-ingester/statefulset-pattern-ingester.yaml @@ -0,0 +1,179 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +{{- if (gt (int .Values.patternIngester.replicas) 0) -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.patternIngesterFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.patternIngesterLabels" . | nindent 4 }} + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.patternIngester.replicas }} + podManagementPolicy: Parallel + updateStrategy: + rollingUpdate: + partition: 0 + serviceName: {{ include "loki.patternIngesterFullname" . }}-headless + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.patternIngester.persistence.enableStatefulSetAutoDeletePVC) }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.patternIngester.persistence.whenDeleted }} + whenScaled: {{ .Values.patternIngester.persistence.whenScaled }} + {{- end }} + selector: + matchLabels: + {{- include "loki.patternIngesterSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.patternIngester.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.patternIngesterSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.patternIngester.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.patternIngester.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.patternIngesterPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.patternIngester.terminationGracePeriodSeconds }} + {{- with .Values.patternIngester.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: pattern-ingester + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.patternIngester.command }} + command: + - {{ coalesce .Values.patternIngester.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=pattern-ingester + {{- with .Values.patternIngester.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.patternIngester.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.patternIngester.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + {{- include "loki.patternIngester.readinessProbe" . | nindent 10 }} + volumeMounts: + - name: temp + mountPath: /tmp + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.patternIngester.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.patternIngester.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.patternIngester.extraContainers }} + {{- toYaml .Values.patternIngester.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.patternIngester.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.patternIngester.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: temp + emptyDir: {} + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- if not .Values.patternIngester.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- with .Values.patternIngester.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.patternIngester.persistence.enabled }} + volumeClaimTemplates: + {{- range .Values.patternIngester.persistence.claims }} + - metadata: + name: {{ .name }} + {{- with .annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .size | quote }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/production/helm/loki/templates/querier/_helpers-querier.tpl b/production/helm/loki/templates/querier/_helpers-querier.tpl new file mode 100644 index 000000000000..aa557c5b8da4 --- /dev/null +++ b/production/helm/loki/templates/querier/_helpers-querier.tpl @@ -0,0 +1,32 @@ +{{/* +querier fullname +*/}} +{{- define "loki.querierFullname" -}} +{{ include "loki.fullname" . }}-querier +{{- end }} + +{{/* +querier common labels +*/}} +{{- define "loki.querierLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier selector labels +*/}} +{{- define "loki.querierSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: querier +{{- end }} + +{{/* +querier priority class name +*/}} +{{- define "loki.querierPriorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.querier.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/querier/deployment-querier.yaml b/production/helm/loki/templates/querier/deployment-querier.yaml new file mode 100644 index 000000000000..80699f21fd16 --- /dev/null +++ b/production/helm/loki/templates/querier/deployment-querier.yaml @@ -0,0 +1,166 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "loki.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.querierLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.querier.autoscaling.enabled }} + replicas: {{ .Values.querier.replicas }} +{{- end }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.querier.maxSurge }} + maxUnavailable: 1 + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "loki.querierSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.querier.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.querierSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.querier.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + {{- with .Values.querier.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.querier.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.querierPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.querier.terminationGracePeriodSeconds }} + {{- with .Values.querier.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: querier + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=querier + {{- if .Values.ingester.zoneAwareReplication.enabled }} + {{- if and (.Values.ingester.zoneAwareReplication.migration.enabled) (not .Values.ingester.zoneAwareReplication.migration.readPath) }} + - -distributor.zone-awareness-enabled=false + {{- else }} + - -distributor.zone-awareness-enabled=true + {{- end }} + {{- end }} + {{- with .Values.querier.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.querier.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.querier.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + readinessProbe: + {{- toYaml .Values.loki.readinessProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.loki.livenessProbe | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.querier.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.querier.resources | nindent 12 }} + {{- if .Values.querier.extraContainers }} + {{- toYaml .Values.querier.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.querier.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.querier.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.querier.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.querier.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + - name: data + emptyDir: {} + {{- with .Values.querier.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/querier/hpa.yaml b/production/helm/loki/templates/querier/hpa.yaml new file mode 100644 index 000000000000..08d81cb5903b --- /dev/null +++ b/production/helm/loki/templates/querier/hpa.yaml @@ -0,0 +1,55 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.querier.autoscaling.enabled }} +{{- $apiVersion := include "loki.hpa.apiVersion" . -}} +apiVersion: {{ $apiVersion }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "loki.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.querierLabels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "loki.querierFullname" . }} + minReplicas: {{ .Values.querier.autoscaling.minReplicas }} + maxReplicas: {{ .Values.querier.autoscaling.maxReplicas }} + metrics: + {{- with .Values.querier.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.querier.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.querier.autoscaling.customMetrics }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.querier.autoscaling.behavior.enabled }} + behavior: + {{- with .Values.querier.autoscaling.behavior.scaleDown }} + scaleDown: {{ toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.querier.autoscaling.behavior.scaleUp }} + scaleUp: {{ toYaml . | nindent 6 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/querier/poddisruptionbudget-querier.yaml b/production/helm/loki/templates/querier/poddisruptionbudget-querier.yaml new file mode 100644 index 000000000000..9dff3cdf8851 --- /dev/null +++ b/production/helm/loki/templates/querier/poddisruptionbudget-querier.yaml @@ -0,0 +1,21 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.querier.replicas) 1) }} +{{- if kindIs "invalid" .Values.querier.maxUnavailable }} +{{- fail "`.Values.querier.maxUnavailable` must be set when `.Values.querier.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.querierLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki.querierSelectorLabels" . | nindent 6 }} + {{- with .Values.querier.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/querier/service-querier.yaml b/production/helm/loki/templates/querier/service-querier.yaml new file mode 100644 index 000000000000..ca5a23bbffb2 --- /dev/null +++ b/production/helm/loki/templates/querier/service-querier.yaml @@ -0,0 +1,33 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.querierFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.querierLabels" . | nindent 4 }} + {{- with .Values.querier.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.querier.appProtocol.grpc }} + appProtocol: {{ .Values.querier.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.querierSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/query-frontend/_helpers-query-frontend.tpl b/production/helm/loki/templates/query-frontend/_helpers-query-frontend.tpl new file mode 100644 index 000000000000..5aebde755efe --- /dev/null +++ b/production/helm/loki/templates/query-frontend/_helpers-query-frontend.tpl @@ -0,0 +1,32 @@ +{{/* +query-frontend fullname +*/}} +{{- define "loki.queryFrontendFullname" -}} +{{ include "loki.fullname" . }}-query-frontend +{{- end }} + +{{/* +query-frontend common labels +*/}} +{{- define "loki.queryFrontendLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend selector labels +*/}} +{{- define "loki.queryFrontendSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: query-frontend +{{- end }} + +{{/* +query-frontend priority class name +*/}} +{{- define "loki.queryFrontendPriorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.queryFrontend.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/query-frontend/deployment-query-frontend.yaml b/production/helm/loki/templates/query-frontend/deployment-query-frontend.yaml new file mode 100644 index 000000000000..6eda5c51dfc0 --- /dev/null +++ b/production/helm/loki/templates/query-frontend/deployment-query-frontend.yaml @@ -0,0 +1,142 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "loki.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.queryFrontend.autoscaling.enabled }} + replicas: {{ .Values.queryFrontend.replicas }} +{{- end }} + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "loki.queryFrontendSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.queryFrontendSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.queryFrontendPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.queryFrontend.terminationGracePeriodSeconds }} + containers: + - name: query-frontend + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + {{- if or .Values.loki.command .Values.queryFrontend.command }} + command: + - {{ coalesce .Values.queryFrontend.command .Values.loki.command | quote }} + {{- end }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=query-frontend + {{- with .Values.queryFrontend.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.queryFrontend.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.queryFrontend.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + readinessProbe: + {{- toYaml .Values.loki.readinessProbe | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.queryFrontend.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.queryFrontend.resources | nindent 12 }} + {{- if .Values.queryFrontend.extraContainers }} + {{- toYaml .Values.queryFrontend.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.queryFrontend.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.queryFrontend.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/production/helm/loki/templates/query-frontend/hpa.yaml b/production/helm/loki/templates/query-frontend/hpa.yaml new file mode 100644 index 000000000000..c326287bd8fe --- /dev/null +++ b/production/helm/loki/templates/query-frontend/hpa.yaml @@ -0,0 +1,55 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.queryFrontend.autoscaling.enabled }} +{{- $apiVersion := include "loki.hpa.apiVersion" . -}} +apiVersion: {{ $apiVersion }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "loki.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.queryFrontendLabels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "loki.queryFrontendFullname" . }} + minReplicas: {{ .Values.queryFrontend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.queryFrontend.autoscaling.maxReplicas }} + metrics: + {{- with .Values.queryFrontend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.queryFrontend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if (eq $apiVersion "autoscaling/v2") }} + target: + type: Utilization + averageUtilization: {{ . }} + {{- else }} + targetAverageUtilization: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.queryFrontend.autoscaling.customMetrics }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.queryFrontend.autoscaling.behavior.enabled }} + behavior: + {{- with .Values.queryFrontend.autoscaling.behavior.scaleDown }} + scaleDown: {{ toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.queryFrontend.autoscaling.behavior.scaleUp }} + scaleUp: {{ toYaml . | nindent 6 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/query-frontend/poddisruptionbudget-query-frontent.yaml b/production/helm/loki/templates/query-frontend/poddisruptionbudget-query-frontent.yaml new file mode 100644 index 000000000000..f10040594250 --- /dev/null +++ b/production/helm/loki/templates/query-frontend/poddisruptionbudget-query-frontent.yaml @@ -0,0 +1,21 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.queryFrontend.replicas) 1) }} +{{- if kindIs "invalid" .Values.queryFrontend.maxUnavailable }} +{{- fail "`.Values.queryFrontend.maxUnavailable` must be set when `.Values.queryFrontend.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.queryFrontendLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki.queryFrontendSelectorLabels" . | nindent 6 }} + {{- with .Values.queryFrontend.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml b/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml new file mode 100644 index 000000000000..258413aa1d57 --- /dev/null +++ b/production/helm/loki/templates/query-frontend/service-query-frontend-headless.yaml @@ -0,0 +1,43 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.queryFrontendFullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.queryFrontend.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + prometheus.io/service-monitor: "false" + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + type: ClusterIP + publishNotReadyAddresses: true + ports: + - name: http-metrics + port: 3100 + targetPort: http + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.queryFrontend.appProtocol.grpc }} + appProtocol: {{ .Values.queryFrontend.appProtocol.grpc }} + {{- end }} + - name: grpclb + port: 9096 + targetPort: grpc + protocol: TCP + {{- if .Values.queryFrontend.appProtocol.grpc }} + appProtocol: {{ .Values.queryFrontend.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.queryFrontendSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/query-frontend/service-query-frontend.yaml b/production/helm/loki/templates/query-frontend/service-query-frontend.yaml new file mode 100644 index 000000000000..b017c5d54aaf --- /dev/null +++ b/production/helm/loki/templates/query-frontend/service-query-frontend.yaml @@ -0,0 +1,41 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.queryFrontendFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.queryFrontendLabels" . | nindent 4 }} + {{- with .Values.queryFrontend.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + publishNotReadyAddresses: true + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- if .Values.queryFrontend.appProtocol.grpc }} + appProtocol: {{ .Values.queryFrontend.appProtocol.grpc }} + {{- end }} + - name: grpclb + port: 9096 + targetPort: grpc + protocol: TCP + {{- if .Values.queryFrontend.appProtocol.grpc }} + appProtocol: {{ .Values.queryFrontend.appProtocol.grpc }} + {{- end }} + selector: + {{- include "loki.queryFrontendSelectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/query-scheduler/_helpers-query-scheduler.tpl b/production/helm/loki/templates/query-scheduler/_helpers-query-scheduler.tpl new file mode 100644 index 000000000000..1f64802428af --- /dev/null +++ b/production/helm/loki/templates/query-scheduler/_helpers-query-scheduler.tpl @@ -0,0 +1,40 @@ +{{/* +query-scheduler fullname +*/}} +{{- define "loki.querySchedulerFullname" -}} +{{ include "loki.fullname" . }}-query-scheduler +{{- end }} + +{{/* +query-scheduler common labels +*/}} +{{- define "loki.querySchedulerLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: query-scheduler +{{- end }} + +{{/* +query-scheduler selector labels +*/}} +{{- define "loki.querySchedulerSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: query-scheduler +{{- end }} + +{{/* +query-scheduler image +*/}} +{{- define "loki.querySchedulerImage" -}} +{{- $dict := dict "loki" .Values.loki.image "service" .Values.queryScheduler.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}} +{{- include "loki.lokiImage" $dict -}} +{{- end }} + +{{/* +query-scheduler priority class name +*/}} +{{- define "loki.querySchedulerPriorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.queryScheduler.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/query-scheduler/deployment-query-scheduler.yaml b/production/helm/loki/templates/query-scheduler/deployment-query-scheduler.yaml new file mode 100644 index 000000000000..11b2829ebeec --- /dev/null +++ b/production/helm/loki/templates/query-scheduler/deployment-query-scheduler.yaml @@ -0,0 +1,140 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "loki.querySchedulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.querySchedulerLabels" . | nindent 4 }} + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.queryScheduler.replicas }} + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "loki.querySchedulerSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryScheduler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.querySchedulerSelectorLabels" . | nindent 8 }} + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryScheduler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + app.kubernetes.io/part-of: memberlist + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryScheduler.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.querySchedulerPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.queryScheduler.terminationGracePeriodSeconds }} + containers: + - name: query-scheduler + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=query-scheduler + {{- with .Values.queryScheduler.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.queryScheduler.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.queryScheduler.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + readinessProbe: + {{- toYaml .Values.loki.readinessProbe | nindent 12 }} + livenessProbe: + {{- toYaml .Values.loki.livenessProbe | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- with .Values.queryScheduler.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.queryScheduler.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.queryScheduler.extraContainers }} + {{- toYaml .Values.queryScheduler.extraContainers | nindent 8}} + {{- end }} + {{- with .Values.queryScheduler.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryScheduler.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryScheduler.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- with .Values.queryScheduler.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/query-scheduler/poddisruptionbudget-query-scheduler.yaml b/production/helm/loki/templates/query-scheduler/poddisruptionbudget-query-scheduler.yaml new file mode 100644 index 000000000000..ed8051fa92ed --- /dev/null +++ b/production/helm/loki/templates/query-scheduler/poddisruptionbudget-query-scheduler.yaml @@ -0,0 +1,21 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.queryScheduler.replicas) 1) }} +{{- if kindIs "invalid" .Values.queryScheduler.maxUnavailable }} +{{- fail "`.Values.queryScheduler.maxUnavailable` must be set when `.Values.queryScheduler.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.querySchedulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.querySchedulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki.querySchedulerSelectorLabels" . | nindent 6 }} + {{- with .Values.queryScheduler.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml b/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml new file mode 100644 index 000000000000..89883155a27e --- /dev/null +++ b/production/helm/loki/templates/query-scheduler/service-query-scheduler.yaml @@ -0,0 +1,35 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.querySchedulerFullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.querySchedulerLabels" . | nindent 4 }} + {{- with .Values.queryScheduler.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: http-metrics + port: 3100 + targetPort: http + protocol: TCP + - name: grpclb + port: 9095 + targetPort: grpc + protocol: TCP + {{- with .Values.queryScheduler.appProtocol.grpc }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "loki.querySchedulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/production/helm/loki/templates/read/deployment-read.yaml b/production/helm/loki/templates/read/deployment-read.yaml index ee9a15108a80..245119cb44dc 100644 --- a/production/helm/loki/templates/read/deployment-read.yaml +++ b/production/helm/loki/templates/read/deployment-read.yaml @@ -120,7 +120,7 @@ spec: {{- end }} {{- with .Values.read.affinity }} affinity: - {{- tpl . $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.read.dnsConfig }} dnsConfig: @@ -144,12 +144,7 @@ spec: - name: data emptyDir: {} - name: config - {{- if .Values.loki.existingSecretForConfig }} - secret: - secretName: {{ .Values.loki.existingSecretForConfig }} - {{- else }} {{- include "loki.configVolume" . | nindent 10 }} - {{- end }} - name: runtime-config configMap: name: {{ template "loki.name" . }}-runtime diff --git a/production/helm/loki/templates/read/statefulset-read.yaml b/production/helm/loki/templates/read/statefulset-read.yaml index 6efa0ad5594c..46d163442cc9 100644 --- a/production/helm/loki/templates/read/statefulset-read.yaml +++ b/production/helm/loki/templates/read/statefulset-read.yaml @@ -133,7 +133,7 @@ spec: {{- end }} {{- with .Values.read.affinity }} affinity: - {{- tpl . $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.read.dnsConfig }} dnsConfig: @@ -155,13 +155,7 @@ spec: - name: tmp emptyDir: {} - name: config - {{- if .Values.loki.existingSecretForConfig }} - secret: - secretName: {{ .Values.loki.existingSecretForConfig }} - {{- else }} - configMap: - name: {{ include "loki.name" . }} - {{- end }} + {{- include "loki.configVolume" . | nindent 10 }} - name: runtime-config configMap: name: {{ template "loki.name" . }}-runtime diff --git a/production/helm/loki/templates/results-cache/poddisruptionbudget-results-cache.yaml b/production/helm/loki/templates/results-cache/poddisruptionbudget-results-cache.yaml new file mode 100644 index 000000000000..6bc393a87de3 --- /dev/null +++ b/production/helm/loki/templates/results-cache/poddisruptionbudget-results-cache.yaml @@ -0,0 +1,16 @@ +{{- if .Values.resultsCache.enabled }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.fullname" . }}-memcached-results-cache + namespace: {{ .Release.Namespace }} + labels: + {{- include "loki.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: memcached-results-cache +spec: + selector: + matchLabels: + {{- include "loki.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: memcached-results-cache + maxUnavailable: 1 +{{- end -}} diff --git a/production/helm/loki/templates/results-cache/service-results-cache-headless.yaml b/production/helm/loki/templates/results-cache/service-results-cache-headless.yaml new file mode 100644 index 000000000000..ce9200856e13 --- /dev/null +++ b/production/helm/loki/templates/results-cache/service-results-cache-headless.yaml @@ -0,0 +1 @@ +{{- include "loki.memcached.service" (dict "ctx" $ "valuesSection" "resultsCache" "component" "results-cache" ) }} diff --git a/production/helm/loki/templates/results-cache/statefulset-results-cache.yaml b/production/helm/loki/templates/results-cache/statefulset-results-cache.yaml new file mode 100644 index 000000000000..042e74e1b203 --- /dev/null +++ b/production/helm/loki/templates/results-cache/statefulset-results-cache.yaml @@ -0,0 +1 @@ +{{- include "loki.memcached.statefulSet" (dict "ctx" $ "valuesSection" "resultsCache" "component" "results-cache" ) }} diff --git a/production/helm/loki/templates/ruler/_helpers-ruler.tpl b/production/helm/loki/templates/ruler/_helpers-ruler.tpl new file mode 100644 index 000000000000..2079e03b0367 --- /dev/null +++ b/production/helm/loki/templates/ruler/_helpers-ruler.tpl @@ -0,0 +1,47 @@ +{{/* +ruler fullname +*/}} +{{- define "loki.rulerFullname" -}} +{{ include "loki.fullname" . }}-ruler +{{- end }} + +{{/* +ruler common labels +*/}} +{{- define "loki.rulerLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler selector labels +*/}} +{{- define "loki.rulerSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: ruler +{{- end }} + +{{/* +ruler image +*/}} +{{- define "loki.rulerImage" -}} +{{- $dict := dict "loki" .Values.loki.image "service" .Values.ruler.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}} +{{- include "loki.lokiImage" $dict -}} +{{- end }} + +{{/* +format rules dir +*/}} +{{- define "loki.rulerRulesDirName" -}} +rules-{{ . | replace "_" "-" | trimSuffix "-" | lower }} +{{- end }} + +{{/* +ruler priority class name +*/}} +{{- define "loki.rulerPriorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.ruler.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ruler/configmap-ruler.yaml b/production/helm/loki/templates/ruler/configmap-ruler.yaml new file mode 100644 index 000000000000..b74f024b415f --- /dev/null +++ b/production/helm/loki/templates/ruler/configmap-ruler.yaml @@ -0,0 +1,14 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if $isDistributed }} +{{- range $dir, $files := .Values.ruler.directories }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "loki.rulerFullname" $ }}-{{ include "loki.rulerRulesDirName" $dir }} + labels: + {{- include "loki.rulerLabels" $ | nindent 4 }} +data: + {{- toYaml $files | nindent 2}} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ruler/poddisruptionbudget-ruler.yaml b/production/helm/loki/templates/ruler/poddisruptionbudget-ruler.yaml new file mode 100644 index 000000000000..82417651862d --- /dev/null +++ b/production/helm/loki/templates/ruler/poddisruptionbudget-ruler.yaml @@ -0,0 +1,20 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed (gt (int .Values.ruler.replicas) 1) }} +{{- if kindIs "invalid" .Values.ruler.maxUnavailable }} +{{- fail "`.Values.ruler.maxUnavailable` must be set when `.Values.ruler.replicas` is greater than 1." }} +{{- else }} +apiVersion: {{ include "loki.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "loki.rulerFullname" . }} + labels: + {{- include "loki.rulerLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki.rulerSelectorLabels" . | nindent 6 }} + {{- with .Values.ruler.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/ruler/service-ruler.yaml b/production/helm/loki/templates/ruler/service-ruler.yaml new file mode 100644 index 000000000000..8200af2b69a9 --- /dev/null +++ b/production/helm/loki/templates/ruler/service-ruler.yaml @@ -0,0 +1,33 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ruler.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki.rulerFullname" . }} + labels: + {{- include "loki.rulerSelectorLabels" . | nindent 4 }} + {{- with .Values.ruler.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.loki.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + {{- with .Values.ruler.appProtocol.grpc }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "loki.rulerSelectorLabels" . | nindent 4 }} +{{- end }} diff --git a/production/helm/loki/templates/ruler/statefulset-ruler.yaml b/production/helm/loki/templates/ruler/statefulset-ruler.yaml new file mode 100644 index 000000000000..8153a8bb3827 --- /dev/null +++ b/production/helm/loki/templates/ruler/statefulset-ruler.yaml @@ -0,0 +1,177 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- if and $isDistributed .Values.ruler.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "loki.rulerFullname" . }} + labels: + {{- include "loki.rulerLabels" . | nindent 4 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.ruler.replicas }} + revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }} + serviceName: {{ include "loki.rulerFullname" . }} + selector: + matchLabels: + {{- include "loki.rulerSelectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + {{- include "loki.config.checksum" . | nindent 8 }} + {{- with .Values.loki.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ruler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki.rulerSelectorLabels" . | nindent 8 }} + app.kubernetes.io/part-of: memberlist + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ruler.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "loki.serviceAccountName" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ruler.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki.rulerPriorityClassName" . | nindent 6 }} + securityContext: + {{- toYaml .Values.loki.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.ruler.terminationGracePeriodSeconds }} + {{- with .Values.ruler.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: ruler + image: {{ include "loki.image" . }} + imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + args: + - -config.file=/etc/loki/config/config.yaml + - -target=ruler + {{- with .Values.ruler.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + {{- with .Values.ruler.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.ruler.extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.loki.containerSecurityContext | nindent 12 }} + readinessProbe: + {{- toYaml .Values.loki.readinessProbe | nindent 12 }} + volumeMounts: + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: data + mountPath: /var/loki + - name: tmp + mountPath: /tmp/loki + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "loki.rulerRulesDirName" $dir }} + mountPath: /etc/loki/rules/{{ $dir }} + {{- end }} + {{- with .Values.ruler.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.ruler.resources | nindent 12 }} + {{- with .Values.ruler.extraContainers }} + {{- toYaml . | nindent 8}} + {{- end }} + {{- with .Values.ruler.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ruler.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ruler.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ruler.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + {{- include "loki.configVolume" . | nindent 10 }} + - name: runtime-config + configMap: + name: {{ template "loki.name" . }}-runtime + {{- if .Values.enterprise.enabled }} + - name: license + secret: + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} + {{- else }} + secretName: enterprise-logs-license + {{- end }} + {{- end }} + {{- range $dir, $_ := .Values.ruler.directories }} + - name: {{ include "loki.rulerRulesDirName" $dir }} + configMap: + name: {{ include "loki.rulerFullname" $ }}-{{ include "loki.rulerRulesDirName" $dir }} + {{- end }} + - name: tmp + emptyDir: {} + {{- with .Values.ruler.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.ruler.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.ruler.persistence.annotations }} + annotations: + {{- . | toYaml | nindent 10 }} + {{- end }} + spec: + accessModes: + - ReadWriteOnce + {{- with .Values.ruler.persistence.storageClass }} + storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} + {{- end }} + resources: + requests: + storage: {{ .Values.ruler.persistence.size | quote }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/single-binary/statefulset.yaml b/production/helm/loki/templates/single-binary/statefulset.yaml index 8922c89ab33a..51c0062fc94f 100644 --- a/production/helm/loki/templates/single-binary/statefulset.yaml +++ b/production/helm/loki/templates/single-binary/statefulset.yaml @@ -135,7 +135,7 @@ spec: {{- end }} {{- with .Values.singleBinary.affinity }} affinity: - {{- tpl . $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.singleBinary.dnsConfig }} dnsConfig: @@ -153,12 +153,7 @@ spec: - name: tmp emptyDir: {} - name: config - {{- if .Values.loki.existingSecretForConfig }} - secret: - secretName: {{ .Values.loki.existingSecretForConfig }} - {{- else }} {{- include "loki.configVolume" . | nindent 10 }} - {{- end }} - name: runtime-config configMap: name: {{ template "loki.name" . }}-runtime diff --git a/production/helm/loki/templates/table-manager/deployment-table-manager.yaml b/production/helm/loki/templates/table-manager/deployment-table-manager.yaml index aeb5b1affea5..e3f6d0d94a69 100644 --- a/production/helm/loki/templates/table-manager/deployment-table-manager.yaml +++ b/production/helm/loki/templates/table-manager/deployment-table-manager.yaml @@ -80,6 +80,10 @@ spec: volumeMounts: - name: config mountPath: /etc/loki/config + {{- if .Values.enterprise.enabled }} + - name: license + mountPath: /etc/loki/license + {{- end }} {{- with .Values.tableManager.extraVolumeMounts }} {{- toYaml . | nindent 12 }} {{- end }} @@ -90,7 +94,7 @@ spec: {{- end }} {{- with .Values.tableManager.affinity }} affinity: - {{- tpl . $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tableManager.dnsConfig }} dnsConfig: @@ -106,12 +110,16 @@ spec: {{- end }} volumes: - name: config - {{- if .Values.loki.existingSecretForConfig }} + {{- include "loki.configVolume" . | nindent 10 }} + {{- if .Values.enterprise.enabled }} + - name: license secret: - secretName: {{ .Values.loki.existingSecretForConfig }} + {{- if .Values.enterprise.useExternalLicense }} + secretName: {{ .Values.enterprise.externalLicenseName }} {{- else }} - {{- include "loki.configVolume" . | nindent 10 }} + secretName: enterprise-logs-license {{- end }} + {{- end }} {{- with .Values.tableManager.extraVolumes }} {{- toYaml . | nindent 8 }} {{- end }} diff --git a/production/helm/loki/templates/tests/test-canary.yaml b/production/helm/loki/templates/tests/test-canary.yaml index a4f11e214a1c..9384865b7b07 100644 --- a/production/helm/loki/templates/tests/test-canary.yaml +++ b/production/helm/loki/templates/tests/test-canary.yaml @@ -1,5 +1,5 @@ {{- with .Values.test }} -{{- if and .enabled $.Values.monitoring.selfMonitoring.enabled $.Values.monitoring.lokiCanary.enabled }} +{{- if $.Values.lokiCanary.enabled }} --- apiVersion: v1 kind: Pod @@ -21,6 +21,8 @@ spec: - name: loki-helm-test image: {{ include "loki.helmTestImage" $ }} env: + - name: CANARY_SERVICE_ADDRESS + value: "{{ .canaryServiceAddress }}" - name: CANARY_PROMETHEUS_ADDRESS value: "{{ .prometheusAddress }}" {{- with .timeout }} diff --git a/production/helm/loki/templates/tokengen/job-tokengen.yaml b/production/helm/loki/templates/tokengen/job-tokengen.yaml index b917395c3c60..f9ae7374c2d1 100644 --- a/production/helm/loki/templates/tokengen/job-tokengen.yaml +++ b/production/helm/loki/templates/tokengen/job-tokengen.yaml @@ -116,13 +116,7 @@ spec: {{- end }} volumes: - name: config - {{- if .Values.enterprise.useExternalConfig }} - secret: - secretName: {{ .Values.enterprise.externalConfigName }} - {{- else }} - configMap: - name: {{ include "loki.name" . }} - {{- end }} + {{- include "loki.configVolume" . | nindent 10 }} - name: runtime-config configMap: name: {{ template "loki.name" . }}-runtime diff --git a/production/helm/loki/templates/validate.yaml b/production/helm/loki/templates/validate.yaml index 3a2e8ca79fad..93e2490636b4 100644 --- a/production/helm/loki/templates/validate.yaml +++ b/production/helm/loki/templates/validate.yaml @@ -2,26 +2,40 @@ {{- fail "Top level 'config' is not allowed. Most common configuration sections are exposed under the `loki` section. If you need to override the whole config, provide the configuration as a string that can contain template expressions under `loki.config`. Alternatively, you can provide the configuration as an external secret." }} {{- end }} -{{- if and (not .Values.monitoring.selfMonitoring.enabled) .Values.test.enabled }} -{{- fail "Helm test requires self monitoring to be enabled"}} -{{- end }} - -{{- if and (not .Values.monitoring.lokiCanary.enabled) .Values.test.enabled }} +{{- if and (not .Values.lokiCanary.enabled) .Values.test.enabled }} {{- fail "Helm test requires the Loki Canary to be enabled"}} {{- end }} -{{- if and .Values.test.enabled (not .Values.test.prometheusAddress) }} -{{- fail "Helm test requires a prometheusAddress for an instance scraping the Loki canary's metrics"}} -{{- end }} - {{- $singleBinaryReplicas := int .Values.singleBinary.replicas }} {{- $isUsingFilesystem := eq (include "loki.isUsingObjectStorage" .) "false" }} {{- $atLeastOneScalableReplica := or (gt (int .Values.backend.replicas) 0) (gt (int .Values.read.replicas) 0) (gt (int .Values.write.replicas) 0) }} +{{- $atLeastOneDistributedReplica := or (gt (int .Values.ingester.replicas) 0) (gt (int .Values.distributor.replicas) 0) (gt (int .Values.querier.replicas) 0) (gt (int .Values.queryFrontend.replicas) 0) (gt (int .Values.queryScheduler.replicas) 0) (gt (int .Values.indexGateway.replicas) 0) (gt (int .Values.compactor.replicas) 0) (gt (int .Values.ruler.replicas) 0) }} {{- if and $isUsingFilesystem (gt $singleBinaryReplicas 1) }} {{- fail "Cannot run more than 1 Single Binary replica without an object storage backend."}} {{- end }} -{{- if and $isUsingFilesystem (and (eq $singleBinaryReplicas 0) $atLeastOneScalableReplica) }} -{{- fail "Cannot run Scalable targets (backend, read, write) without an object storage backend."}} +{{- if and $isUsingFilesystem (and (eq $singleBinaryReplicas 0) (or $atLeastOneScalableReplica $atLeastOneDistributedReplica)) }} +{{- fail "Cannot run scalable targets (backend, read, write) or distributed targets without an object storage backend."}} +{{- end }} + +{{- if and $atLeastOneScalableReplica $atLeastOneDistributedReplica (ne .Values.deploymentMode "SimpleScalable<->Distributed") }} +{{- fail "You have more than zero replicas configured for scalable targets (backend, read, write) and distributed targets. If this was intentional change the deploymentMode to the transitional 'SimpleScalable<->Distributed' mode" }} +{{- end }} + +{{- if and (gt $singleBinaryReplicas 0) $atLeastOneDistributedReplica }} +{{- fail "You have more than zero replicas configured for both the single binary and distributed targets, there is no transition mode between these targets please change one or the other to zero or transition to the SimpleScalable mode first."}} +{{- end }} + +{{- if and (gt $singleBinaryReplicas 0) $atLeastOneScalableReplica (ne .Values.deploymentMode "SingleBinary<->SimpleScalable") }} +{{- fail "You have more than zero replicas configured for both the single binary and simple scalable targets. If this was intentional change the deploymentMode to the transitional 'SingleBinary<->SimpleScalable' mode"}} {{- end }} + +{{- if and (or (not (empty .Values.loki.schemaConfig)) (not (empty .Values.loki.structuredConfig.schema_config))) .Values.loki.useTestSchema }} +{{- fail "loki.useTestSchema must be false if loki.schemaConfig or loki.structuredConfig.schema_config are defined."}} +{{- end }} + + +{{- if and (empty .Values.loki.schemaConfig) (empty .Values.loki.structuredConfig.schema_config) (not .Values.loki.useTestSchema) }} +{{- fail "You must provide a schema_config for Loki, one is not provided as this will be individual for every Loki cluster. See https://grafana.com/docs/loki/latest/operations/storage/schema/ for schema information. For quick testing (with no persistence) add `--set loki.useTestSchema=true`"}} +{{- end }} \ No newline at end of file diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml index 5aa1e78eaf58..0c89cde090ab 100644 --- a/production/helm/loki/templates/write/statefulset-write.yaml +++ b/production/helm/loki/templates/write/statefulset-write.yaml @@ -132,7 +132,7 @@ spec: {{- if .Values.enterprise.enabled }} - name: license mountPath: /etc/loki/license - {{- end}} + {{- end }} {{- with .Values.write.extraVolumeMounts }} {{- toYaml . | nindent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: {{- end }} {{- with .Values.write.affinity }} affinity: - {{- tpl . $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.write.dnsConfig }} dnsConfig: @@ -167,12 +167,7 @@ spec: {{- toYaml .Values.write.persistence.dataVolumeParameters | nindent 10 }} {{- end}} - name: config - {{- if .Values.loki.existingSecretForConfig }} - secret: - secretName: {{ .Values.loki.existingSecretForConfig }} - {{- else }} {{- include "loki.configVolume" . | nindent 10 }} - {{- end }} - name: runtime-config configMap: name: {{ template "loki.name" . }}-runtime diff --git a/production/helm/loki/test/config_test.go b/production/helm/loki/test/config_test.go new file mode 100644 index 000000000000..6926c7b2a85c --- /dev/null +++ b/production/helm/loki/test/config_test.go @@ -0,0 +1,220 @@ +package test + +import ( + "os" + "os/exec" + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +type replicas struct { + Replicas int `yaml:"replicas"` +} +type loki struct { + Storage struct { + Type string `yaml:"type"` + } `yaml:"storage"` +} + +type values struct { + DeploymentMode string `yaml:"deploymentMode"` + Backend replicas `yaml:"backend"` + Compactor replicas `yaml:"compactor"` + Distributor replicas `yaml:"distributor"` + IndexGateway replicas `yaml:"indexGateway"` + Ingester replicas `yaml:"ingester"` + Querier replicas `yaml:"querier"` + QueryFrontend replicas `yaml:"queryFrontend"` + QueryScheduler replicas `yaml:"queryScheduler"` + Read replicas `yaml:"read"` + Ruler replicas `yaml:"ruler"` + SingleBinary replicas `yaml:"singleBinary"` + Write replicas `yaml:"write"` + + Loki loki `yaml:"loki"` +} + +func templateConfig(t *testing.T, vals values) error { + y, err := yaml.Marshal(&vals) + require.NoError(t, err) + require.Greater(t, len(y), 0) + + f, err := os.CreateTemp("", "values.yaml") + require.NoError(t, err) + + _, err = f.Write(y) + require.NoError(t, err) + + cmd := exec.Command("helm", "dependency", "build") + // Dependency build needs to be run from the parent directory where the chart is located. + cmd.Dir = "../" + var cmdOutput []byte + if cmdOutput, err = cmd.CombinedOutput(); err != nil { + t.Log("dependency build failed", "err", string(cmdOutput)) + return err + } + + cmd = exec.Command("helm", "template", "../", "--values", f.Name()) + if cmdOutput, err := cmd.CombinedOutput(); err != nil { + t.Log("template failed", "err", string(cmdOutput)) + return err + } + + return nil +} + +// E.Welch these tests fail because the templateConfig function above can't resolve the chart dependencies and I'm not sure how to fix this.... + +//func Test_InvalidConfigs(t *testing.T) { +// t.Run("running both single binary and scalable targets", func(t *testing.T) { +// vals := values{ +// SingleBinary: replicas{Replicas: 1}, +// Write: replicas{Replicas: 1}, +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "gcs"}, +// }, +// } +// require.Error(t, templateConfig(t, vals)) +// }) +// +// t.Run("running both single binary and distributed targets", func(t *testing.T) { +// vals := values{ +// SingleBinary: replicas{Replicas: 1}, +// Distributor: replicas{Replicas: 1}, +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "gcs"}, +// }, +// } +// require.Error(t, templateConfig(t, vals)) +// }) +// +// t.Run("running both scalable and distributed targets", func(t *testing.T) { +// vals := values{ +// Read: replicas{Replicas: 1}, +// Distributor: replicas{Replicas: 1}, +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "gcs"}, +// }, +// } +// require.Error(t, templateConfig(t, vals)) +// }) +// +// t.Run("running scalable with filesystem storage", func(t *testing.T) { +// vals := values{ +// Read: replicas{Replicas: 1}, +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "filesystem"}, +// }, +// } +// +// require.Error(t, templateConfig(t, vals)) +// }) +// +// t.Run("running distributed with filesystem storage", func(t *testing.T) { +// vals := values{ +// Distributor: replicas{Replicas: 1}, +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "filesystem"}, +// }, +// } +// +// require.Error(t, templateConfig(t, vals)) +// }) +//} +// +//func Test_ValidConfigs(t *testing.T) { +// t.Run("single binary", func(t *testing.T) { +// vals := values{ +// +// DeploymentMode: "SingleBinary", +// +// SingleBinary: replicas{Replicas: 1}, +// +// Backend: replicas{Replicas: 0}, +// Compactor: replicas{Replicas: 0}, +// Distributor: replicas{Replicas: 0}, +// IndexGateway: replicas{Replicas: 0}, +// Ingester: replicas{Replicas: 0}, +// Querier: replicas{Replicas: 0}, +// QueryFrontend: replicas{Replicas: 0}, +// QueryScheduler: replicas{Replicas: 0}, +// Read: replicas{Replicas: 0}, +// Ruler: replicas{Replicas: 0}, +// Write: replicas{Replicas: 0}, +// +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "filesystem"}, +// }, +// } +// require.NoError(t, templateConfig(t, vals)) +// }) +// +// t.Run("scalable", func(t *testing.T) { +// vals := values{ +// +// DeploymentMode: "SimpleScalable", +// +// Backend: replicas{Replicas: 1}, +// Read: replicas{Replicas: 1}, +// Write: replicas{Replicas: 1}, +// +// Compactor: replicas{Replicas: 0}, +// Distributor: replicas{Replicas: 0}, +// IndexGateway: replicas{Replicas: 0}, +// Ingester: replicas{Replicas: 0}, +// Querier: replicas{Replicas: 0}, +// QueryFrontend: replicas{Replicas: 0}, +// QueryScheduler: replicas{Replicas: 0}, +// Ruler: replicas{Replicas: 0}, +// SingleBinary: replicas{Replicas: 0}, +// +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "gcs"}, +// }, +// } +// require.NoError(t, templateConfig(t, vals)) +// }) +// +// t.Run("distributed", func(t *testing.T) { +// vals := values{ +// DeploymentMode: "Distributed", +// +// Compactor: replicas{Replicas: 1}, +// Distributor: replicas{Replicas: 1}, +// IndexGateway: replicas{Replicas: 1}, +// Ingester: replicas{Replicas: 1}, +// Querier: replicas{Replicas: 1}, +// QueryFrontend: replicas{Replicas: 1}, +// QueryScheduler: replicas{Replicas: 1}, +// Ruler: replicas{Replicas: 1}, +// +// Backend: replicas{Replicas: 0}, +// Read: replicas{Replicas: 0}, +// SingleBinary: replicas{Replicas: 0}, +// Write: replicas{Replicas: 0}, +// +// Loki: loki{ +// Storage: struct { +// Type string `yaml:"type"` +// }{Type: "gcs"}, +// }, +// } +// require.NoError(t, templateConfig(t, vals)) +// }) +//} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 70d853bca2ce..9cf1c3fd1eef 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -18,17 +18,25 @@ fullnameOverride: null clusterLabelOverride: null # -- Image pull secrets for Docker images imagePullSecrets: [] -kubectlImage: - # -- The Docker registry - registry: docker.io - # -- Docker image repository - repository: bitnami/kubectl - # -- Overrides the image tag whose default is the chart's appVersion - tag: null - # -- Overrides the image tag with an image digest - digest: null - # -- Docker image pull policy - pullPolicy: IfNotPresent + +# -- Deployment mode lets you specify how to deploy Loki. +# There are 3 options: +# - SingleBinary: Loki is deployed as a single binary, useful for small installs typically without HA, up to a few tens of GB/day. +# - SimpleScalable: Loki is deployed as 3 targets: read, write, and backend. Useful for medium installs easier to manage than distributed, up to a about 1TB/day. +# - Distributed: Loki is deployed as individual microservices. The most complicated but most capable, useful for large installs, typically over 1TB/day. +# There are also 2 additional modes used for migrating between deployment modes: +# - SingleBinary<->SimpleScalable: Migrate from SingleBinary to SimpleScalable (or vice versa) +# - SimpleScalable<->Distributed: Migrate from SimpleScalable to Distributed (or vice versa) +# Note: SimpleScalable and Distributed REQUIRE the use of object storage. +deploymentMode: SimpleScalable + +###################################################################################################################### +# +# Base Loki Configs including kubernetes configurations and configurations for Loki itself, +# see below for more specifics on Loki's configuration. +# +###################################################################################################################### +# -- Configuration for running Loki loki: # Configures the readiness probe for all of the Loki pods readinessProbe: @@ -77,14 +85,44 @@ loki: allowPrivilegeEscalation: false # -- Should enableServiceLinks be enabled. Default to enable enableServiceLinks: true - # -- Specify an existing secret containing loki configuration. If non-empty, overrides `loki.config` - existingSecretForConfig: "" + ###################################################################################################################### + # + # Loki Configuration + # + # There are several ways to pass configuration to Loki, listing them here in order of our preference for how + # you should use this chart. + # 1. Use the templated value of loki.config below and the corresponding override sections which follow. + # This allows us to set a lot of important Loki configurations and defaults and also allows us to maintain them + # over time as Loki changes and evolves. + # 2. Use the loki.structuredConfig section. + # This will completely override the templated value of loki.config, so you MUST provide the entire Loki config + # including any configuration that we set in loki.config unless you explicitly are trying to change one of those + # values and are not able to do so with the templated sections. + # If you choose this approach the burden is on you to maintain any changes we make to the templated config. + # 3. Use an existing secret or configmap to provide the configuration. + # This option is mostly provided for folks who have external processes which provide or modify the configuration. + # When using this option you can specify a different name for loki.generatedConfigObjectName and configObjectName + # if you have a process which takes the generated config and modifies it, or you can stop the chart from generating + # a config entirely by setting loki.generatedConfigObjectName to + # + ###################################################################################################################### + # -- Defines what kind of object stores the configuration, a ConfigMap or a Secret. # In order to move sensitive information (such as credentials) from the ConfigMap/Secret to a more secure location (e.g. vault), it is possible to use [environment variables in the configuration](https://grafana.com/docs/loki/latest/configuration/#use-environment-variables-in-the-configuration). # Such environment variables can be then stored in a separate Secret and injected via the global.extraEnvFrom value. For details about environment injection from a Secret please see [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-as-container-environment-variables). configStorageType: ConfigMap - # -- Name of the Secret or ConfigMap that contains the configuration (used for naming even if config is internal). - externalConfigSecretName: '{{ include "loki.name" . }}' + + # -- The name of the object which Loki will mount as a volume containing the config. + # If the configStorageType is Secret, this will be the name of the Secret, if it is ConfigMap, this will be the name of the ConfigMap. + # The value will be passed through tpl. + configObjectName: '{{ include "loki.name" . }}' + + # -- The name of the Secret or ConfigMap that will be created by this chart. + # If empty, no configmap or secret will be created. + # The value will be passed through tpl. + generatedConfigObjectName: '{{ include "loki.name" . }}' + + # -- Config file contents for Loki # @default -- See values.yaml config: | @@ -135,37 +173,34 @@ loki: runtime_config: file: /etc/loki/runtime-config/runtime-config.yaml - {{- with .Values.loki.memcached.chunk_cache }} - {{- if and .enabled (or .host .addresses) }} + {{- with .Values.chunksCache }} + {{- if .enabled }} chunk_store_config: chunk_cache_config: + default_validity: {{ .defaultValidity }} + background: + writeback_goroutines: {{ .writebackParallelism }} + writeback_buffer: {{ .writebackBuffer }} + writeback_size_limit: {{ .writebackSizeLimit }} memcached: - batch_size: {{ .batch_size }} + batch_size: {{ .batchSize }} parallelism: {{ .parallelism }} memcached_client: - {{- if .host }} - host: {{ .host }} - {{- end }} - {{- if .addresses }} - addresses: {{ .addresses }} - {{- end }} - service: {{ .service }} + addresses: dnssrvnoa+_memcached-client._tcp.{{ template "loki.fullname" $ }}-chunks-cache.{{ $.Release.Namespace }}.svc + consistent_hash: true + timeout: {{ .timeout }} + max_idle_conns: 72 {{- end }} {{- end }} {{- if .Values.loki.schemaConfig }} schema_config: {{- toYaml .Values.loki.schemaConfig | nindent 2}} - {{- else }} + {{- end }} + + {{- if .Values.loki.useTestSchema }} schema_config: - configs: - - from: 2022-01-11 - store: boltdb-shipper - object_store: {{ .Values.loki.storage.type }} - schema: v12 - index: - prefix: loki_index_ - period: 24h + {{- toYaml .Values.loki.testSchemaConfig | nindent 2}} {{- end }} {{ include "loki.rulerConfig" . }} @@ -176,25 +211,28 @@ loki: retention_period: {{ .Values.tableManager.retention_period }} {{- end }} - {{- with .Values.loki.memcached.results_cache }} query_range: align_queries_with_step: true - {{- if and .enabled (or .host .addresses) }} - cache_results: {{ .enabled }} + {{- with .Values.loki.query_range }} + {{- tpl (. | toYaml) $ | nindent 4 }} + {{- end }} + {{- if .Values.resultsCache.enabled }} + {{- with .Values.resultsCache }} + cache_results: true results_cache: cache: - default_validity: {{ .default_validity }} + default_validity: {{ .defaultValidity }} + background: + writeback_goroutines: {{ .writebackParallelism }} + writeback_buffer: {{ .writebackBuffer }} + writeback_size_limit: {{ .writebackSizeLimit }} memcached_client: - {{- if .host }} - host: {{ .host }} - {{- end }} - {{- if .addresses }} - addresses: {{ .addresses }} - {{- end }} - service: {{ .service }} + consistent_hash: true + addresses: dnssrvnoa+_memcached-client._tcp.{{ template "loki.fullname" $ }}-results-cache.{{ $.Release.Namespace }}.svc timeout: {{ .timeout }} + update_interval: 1m + {{- end }} {{- end }} - {{- end }} {{- with .Values.loki.storage_config }} storage_config: @@ -255,12 +293,15 @@ loki: server: http_listen_port: 3100 grpc_listen_port: 9095 + http_server_read_timeout: 600s + http_server_write_timeout: 600s # -- Limits config limits_config: reject_old_samples: true reject_old_samples_max_age: 168h max_cache_freshness_per_query: 10m split_queries_by_interval: 15m + query_timeout: 300s # -- Provides a reloadable runtime configuration file for some specific configuration runtimeConfig: {} # -- Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration @@ -340,6 +381,18 @@ loki: default_validity: "12h" # -- Check https://grafana.com/docs/loki/latest/configuration/#schema_config for more info on how to configure schemas schemaConfig: {} + # -- a real Loki install requires a proper schemaConfig defined above this, however for testing or playing around + # you can enable useTestSchema + useTestSchema: false + testSchemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h # -- Check https://grafana.com/docs/loki/latest/configuration/#ruler for more info on configuring ruler rulerConfig: {} # -- Structured loki configuration, takes precedence over `loki.config`, `loki.schemaConfig`, `loki.storageConfig` @@ -348,6 +401,12 @@ loki: query_scheduler: {} # -- Additional storage config storage_config: + boltdb_shipper: + index_gateway_client: + server_address: '{{ include "loki.indexGatewayAddress" . }}' + tsdb_shipper: + index_gateway_client: + server_address: '{{ include "loki.indexGatewayAddress" . }}' hedging: at: "250ms" max_per_second: 20 @@ -357,14 +416,17 @@ loki: # -- Optional analytics configuration analytics: {} # -- Optional querier configuration + query_range: {} + # -- Optional querier configuration querier: {} # -- Optional ingester configuration ingester: {} # -- Optional index gateway configuration index_gateway: - mode: ring + mode: simple frontend: scheduler_address: '{{ include "loki.querySchedulerAddress" . }}' + tail_proxy_url: '{{ include "loki.querierAddress" . }}' frontend_worker: scheduler_address: '{{ include "loki.querySchedulerAddress" . }}' # -- Optional distributor configuration @@ -372,6 +434,14 @@ loki: # -- Enable tracing tracing: enabled: false + +###################################################################################################################### +# +# Enterprise Loki Configs +# +###################################################################################################################### + +# -- Configuration for running Enterprise Loki enterprise: # Enable enterprise features, license must be provided enabled: false @@ -394,6 +464,8 @@ enterprise: externalLicenseName: null # -- Name of the external config secret to use externalConfigName: "" + # -- Use GEL gateway, if false will use the default nginx gateway + gelGateway: true # -- If enabled, the correct admin_client storage will be configured. If disabled while running enterprise, # make sure auth is set to `type: trust`, or that `auth_enabled` is set to `false`. adminApi: @@ -504,15 +576,114 @@ enterprise: pullPolicy: IfNotPresent # -- Volume mounts to add to the provisioner pods extraVolumeMounts: [] -# -- Options that may be necessary when performing a migration from another helm chart -migrate: - # -- When migrating from a distributed chart like loki-distributed or enterprise-logs - fromDistributed: - # -- Set to true if migrating from a distributed helm chart - enabled: false - # -- If migrating from a distributed service, provide the distributed deployment's - # memberlist service DNS so the new deployment can join its ring. - memberlistService: "" +# -- kubetclImage is used in the enterprise provisioner and tokengen jobs +kubectlImage: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: bitnami/kubectl + # -- Overrides the image tag whose default is the chart's appVersion + tag: null + # -- Overrides the image tag with an image digest + digest: null + # -- Docker image pull policy + pullPolicy: IfNotPresent + +###################################################################################################################### +# +# Chart Testing +# +###################################################################################################################### + +# -- Section for configuring optional Helm test +test: + enabled: true + # -- Used to directly query the metrics endpoint of the canary for testing, this approach avoids needing prometheus for testing. + # This in a newer approach to using prometheusAddress such that tests do not have a dependency on prometheus + canaryServiceAddress: "http://loki-canary:3500/metrics" + # -- Address of the prometheus server to query for the test. This overrides any value set for canaryServiceAddress. + # This is kept for backward compatibility and may be removed in future releases. Previous value was 'http://prometheus:9090' + prometheusAddress: "" + # -- Number of times to retry the test before failing + timeout: 1m + # -- Additional labels for the test pods + labels: {} + # -- Additional annotations for test pods + annotations: {} + # -- Image to use for loki canary + image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/loki-helm-test + # -- Overrides the image tag whose default is the chart's appVersion + tag: "ewelch-distributed-helm-chart-17db5ee" + # -- Overrides the image tag with an image digest + digest: null + # -- Docker image pull policy + pullPolicy: IfNotPresent +# The Loki canary pushes logs to and queries from this loki installation to test +# that it's working correctly +lokiCanary: + enabled: true + # -- If true, the canary will send directly to Loki via the address configured for verification -- + # -- If false, it will write to stdout and an Agent will be needed to scrape and send the logs -- + push: true + # -- The name of the label to look for at loki when doing the checks. + labelname: pod + # -- Additional annotations for the `loki-canary` Daemonset + annotations: {} + # -- Additional labels for each `loki-canary` pod + podLabels: {} + service: + # -- Annotations for loki-canary Service + annotations: {} + # -- Additional labels for loki-canary Service + labels: {} + # -- Additional CLI arguments for the `loki-canary' command + extraArgs: [] + # -- Environment variables to add to the canary pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the canary pods + extraEnvFrom: [] + # -- Volume mounts to add to the canary pods + extraVolumeMounts: [] + # -- Volumes to add to the canary pods + extraVolumes: [] + # -- Resource requests and limits for the canary + resources: {} + # -- DNS config for canary pods + dnsConfig: {} + # -- Node selector for canary pods + nodeSelector: {} + # -- Tolerations for canary pods + tolerations: [] + # -- The name of the PriorityClass for loki-canary pods + priorityClassName: null + # -- Image to use for loki canary + image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/loki-canary + # -- Overrides the image tag whose default is the chart's appVersion + tag: null + # -- Overrides the image tag with an image digest + digest: null + # -- Docker image pull policy + pullPolicy: IfNotPresent + # -- Update strategy for the `loki-canary` Daemonset pods + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + +###################################################################################################################### +# +# Service Accounts and Kubernetes RBAC +# +###################################################################################################################### + serviceAccount: # -- Specifies whether a ServiceAccount should be created create: true @@ -541,324 +712,1304 @@ rbac: # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' # -- Whether to install RBAC in the namespace only or cluster-wide. Useful if you want to watch ConfigMap globally. namespaced: false -# -- Section for configuring optional Helm test -test: - enabled: true - # -- Address of the prometheus server to query for the test - prometheusAddress: "http://prometheus:9090" - # -- Number of times to retry the test before failing - timeout: 1m - # -- Additional labels for the test pods + +###################################################################################################################### +# +# Network Policy configuration +# +###################################################################################################################### + +networkPolicy: + # -- Specifies whether Network Policies should be created + enabled: false + # -- Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) + # or Cilium Network Policies (flavor: cilium) + flavor: kubernetes + metrics: + # -- Specifies the Pods which are allowed to access the metrics port. + # As this is cross-namespace communication, you also need the namespaceSelector. + podSelector: {} + # -- Specifies the namespaces which are allowed to access the metrics port + namespaceSelector: {} + # -- Specifies specific network CIDRs which are allowed to access the metrics port. + # In case you use namespaceSelector, you also have to specify your kubelet networks here. + # The metrics ports are also used for probes. + cidrs: [] + ingress: + # -- Specifies the Pods which are allowed to access the http port. + # As this is cross-namespace communication, you also need the namespaceSelector. + podSelector: {} + # -- Specifies the namespaces which are allowed to access the http port + namespaceSelector: {} + alertmanager: + # -- Specify the alertmanager port used for alerting + port: 9093 + # -- Specifies the alertmanager Pods. + # As this is cross-namespace communication, you also need the namespaceSelector. + podSelector: {} + # -- Specifies the namespace the alertmanager is running in + namespaceSelector: {} + externalStorage: + # -- Specify the port used for external storage, e.g. AWS S3 + ports: [] + # -- Specifies specific network CIDRs you want to limit access to + cidrs: [] + discovery: + # -- (int) Specify the port used for discovery + port: null + # -- Specifies the Pods labels used for discovery. + # As this is cross-namespace communication, you also need the namespaceSelector. + podSelector: {} + # -- Specifies the namespace the discovery Pods are running in + namespaceSelector: {} + egressWorld: + # -- Enable additional cilium egress rules to external world for write, read and backend. + enabled: false + egressKubeApiserver: + # -- Enable additional cilium egress rules to kube-apiserver for backend. + enabled: false + +###################################################################################################################### +# +# Global memberlist configuration +# +###################################################################################################################### + +# Configuration for the memberlist service +memberlist: + service: + publishNotReadyAddresses: false + +###################################################################################################################### +# +# adminAPI configuration, enterprise only. +# +###################################################################################################################### + +# -- Configuration for the `admin-api` target +adminApi: + # -- Define the amount of instances + replicas: 1 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + # -- Additional CLI arguments for the `admin-api` target + extraArgs: {} + # -- Additional labels for the `admin-api` Deployment labels: {} - # -- Additional annotations for test pods + # -- Additional annotations for the `admin-api` Deployment annotations: {} - # -- Image to use for loki canary + # -- Additional labels and annotations for the `admin-api` Service + service: + labels: {} + annotations: {} + # -- Run container as user `enterprise-logs(uid=10001)` + # `fsGroup` must not be specified, because these security options are applied + # on container level not on Pod level. + podSecurityContext: + runAsNonRoot: true + runAsGroup: 10001 + runAsUser: 10001 + containerSecurityContext: + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + # -- Update strategy + strategy: + type: RollingUpdate + # -- Readiness probe + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + # -- Request and limit Kubernetes resources + # -- Values are defined in small.yaml and large.yaml + resources: {} + # -- Configure optional environment variables + env: [] + # -- Configure optional initContainers + initContainers: [] + # -- Conifgure optional extraContainers + extraContainers: [] + # -- Additional volumes for Pods + extraVolumes: [] + # -- Additional volume mounts for Pods + extraVolumeMounts: [] + # -- Affinity for admin-api Pods + affinity: {} + # -- Node selector for admin-api Pods + nodeSelector: {} + # -- Tolerations for admin-api Pods + tolerations: [] + # -- Grace period to allow the admin-api to shutdown before it is killed + terminationGracePeriodSeconds: 60 + + +###################################################################################################################### +# +# Gateway and Ingress +# +# By default this chart will deploy a Nginx container to act as a gateway which handles routing of traffic +# and can also do auth. +# +# If you would prefer you can optionally disable this and enable using k8s ingress to do the incoming routing. +# +###################################################################################################################### + +# Configuration for the gateway +gateway: + # -- Specifies whether the gateway should be enabled + enabled: true + # -- Number of replicas for the gateway + replicas: 1 + # -- Enable logging of 2xx and 3xx HTTP requests + verboseLogging: true + autoscaling: + # -- Enable autoscaling for the gateway + enabled: false + # -- Minimum autoscaling replicas for the gateway + minReplicas: 1 + # -- Maximum autoscaling replicas for the gateway + maxReplicas: 3 + # -- Target CPU utilisation percentage for the gateway + targetCPUUtilizationPercentage: 60 + # -- Target memory utilisation percentage for the gateway + targetMemoryUtilizationPercentage: + # -- See `kubectl explain deployment.spec.strategy` for more + # -- ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + # -- Behavior policies while scaling. + behavior: {} + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 60 + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + deploymentStrategy: + type: RollingUpdate image: - # -- The Docker registry + # -- The Docker registry for the gateway image registry: docker.io - # -- Docker image repository - repository: grafana/loki-helm-test - # -- Overrides the image tag whose default is the chart's appVersion - tag: null - # -- Overrides the image tag with an image digest + # -- The gateway image repository + repository: nginxinc/nginx-unprivileged + # -- The gateway image tag + tag: 1.24-alpine + # -- Overrides the gateway image tag with an image digest digest: null - # -- Docker image pull policy + # -- The gateway image pull policy pullPolicy: IfNotPresent -# Monitoring section determines which monitoring features to enable -monitoring: - # Dashboards for monitoring Loki - dashboards: - # -- If enabled, create configmap with dashboards for monitoring Loki - enabled: true - # -- Alternative namespace to create dashboards ConfigMap in - namespace: null - # -- Additional annotations for the dashboards ConfigMap - annotations: {} - # -- Labels for the dashboards ConfigMap - labels: - grafana_dashboard: "1" - # Recording rules for monitoring Loki, required for some dashboards - rules: - # -- If enabled, create PrometheusRule resource with Loki recording rules - enabled: true - # -- Include alerting rules - alerting: true - # -- Specify which individual alerts should be disabled - # -- Instead of turning off each alert one by one, set the .monitoring.rules.alerting value to false instead. - # -- If you disable all the alerts and keep .monitoring.rules.alerting set to true, the chart will fail to render. - disabled: {} - # LokiRequestErrors: true - # LokiRequestPanics: true - # -- Alternative namespace to create PrometheusRule resources in - namespace: null - # -- Additional annotations for the rules PrometheusRule resource + # -- The name of the PriorityClass for gateway pods + priorityClassName: null + # -- Annotations for gateway deployment + annotations: {} + # -- Annotations for gateway pods + podAnnotations: {} + # -- Additional labels for gateway pods + podLabels: {} + # -- Additional CLI args for the gateway + extraArgs: [] + # -- Environment variables to add to the gateway pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the gateway pods + extraEnvFrom: [] + # -- Lifecycle for the gateway container + lifecycle: {} + # -- Volumes to add to the gateway pods + extraVolumes: [] + # -- Volume mounts to add to the gateway pods + extraVolumeMounts: [] + # -- The SecurityContext for gateway containers + podSecurityContext: + fsGroup: 101 + runAsGroup: 101 + runAsNonRoot: true + runAsUser: 101 + # -- The SecurityContext for gateway containers + containerSecurityContext: + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + # -- Resource requests and limits for the gateway + resources: {} + # -- Containers to add to the gateway pods + extraContainers: [] + # -- Grace period to allow the gateway to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Affinity for gateway pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: gateway + topologyKey: kubernetes.io/hostname + # -- DNS config for gateway pods + dnsConfig: {} + # -- Node selector for gateway pods + nodeSelector: {} + # -- Topology Spread Constraints for gateway pods + topologySpreadConstraints: [] + # -- Tolerations for gateway pods + tolerations: [] + # Gateway service configuration + service: + # -- Port of the gateway service + port: 80 + # -- Type of the gateway service + type: ClusterIP + # -- ClusterIP of the gateway service + clusterIP: null + # -- (int) Node port if service type is NodePort + nodePort: null + # -- Load balancer IPO address if service type is LoadBalancer + loadBalancerIP: null + # -- Annotations for the gateway service annotations: {} - # -- Additional labels for the rules PrometheusRule resource + # -- Labels for gateway service labels: {} - # -- Additional labels for PrometheusRule alerts - additionalRuleLabels: {} - # -- Additional groups to add to the rules file - additionalGroups: [] - # - name: additional-loki-rules - # rules: - # - record: job:loki_request_duration_seconds_bucket:sum_rate - # expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job) - # - record: job_route:loki_request_duration_seconds_bucket:sum_rate - # expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route) - # - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate - # expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container) - # ServiceMonitor configuration - serviceMonitor: - # -- If enabled, ServiceMonitor resources for Prometheus Operator are created - enabled: true - # -- Namespace selector for ServiceMonitor resources - namespaceSelector: {} - # -- ServiceMonitor annotations + # Gateway ingress configuration + ingress: + # -- Specifies whether an ingress for the gateway should be created + enabled: false + # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 + ingressClassName: "" + # -- Annotations for the gateway ingress annotations: {} - # -- Additional ServiceMonitor labels + # -- Labels for the gateway ingress labels: {} - # -- ServiceMonitor scrape interval - # Default is 15s because included recording rules use a 1m rate, and scrape interval needs to be at - # least 1/4 rate interval. - interval: 15s - # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s) - scrapeTimeout: null - # -- ServiceMonitor relabel configs to apply to samples before scraping - # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - relabelings: [] - # -- ServiceMonitor metric relabel configs to apply to samples before ingestion - # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint - metricRelabelings: [] - # -- ServiceMonitor will use http by default, but you can pick https as well - scheme: http - # -- ServiceMonitor will use these tlsConfig settings to make the health check requests - tlsConfig: null - # -- If defined, will create a MetricsInstance for the Grafana Agent Operator. - metricsInstance: - # -- If enabled, MetricsInstance resources for Grafana Agent Operator are created - enabled: true - # -- MetricsInstance annotations - annotations: {} - # -- Additional MetricsInstance labels - labels: {} - # -- If defined a MetricsInstance will be created to remote write metrics. - remoteWrite: null - # Self monitoring determines whether Loki should scrape its own logs. - # This feature currently relies on the Grafana Agent Operator being installed, - # which is installed by default using the grafana-agent-operator sub-chart. - # It will create custom resources for GrafanaAgent, LogsInstance, and PodLogs to configure - # scrape configs to scrape its own logs with the labels expected by the included dashboards. - selfMonitoring: - enabled: true - # -- Tenant to use for self monitoring - tenant: - # -- Name of the tenant - name: "self-monitoring" - # -- Namespace to create additional tenant token secret in. Useful if your Grafana instance - # is in a separate namespace. Token will still be created in the canary namespace. - secretNamespace: "{{ .Release.Namespace }}" - # Grafana Agent configuration - grafanaAgent: - # -- Controls whether to install the Grafana Agent Operator and its CRDs. - # Note that helm will not install CRDs if this flag is enabled during an upgrade. - # In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds - installOperator: true - # -- Grafana Agent annotations - annotations: {} - # -- Additional Grafana Agent labels - labels: {} - # -- Enable the config read api on port 8080 of the agent - enableConfigReadAPI: false - # -- The name of the PriorityClass for GrafanaAgent pods - priorityClassName: null - # -- Resource requests and limits for the grafanaAgent pods - resources: {} - # limits: - # memory: 200Mi - # requests: - # cpu: 50m - # memory: 100Mi - # -- Tolerations for GrafanaAgent pods - tolerations: [] - # PodLogs configuration - podLogs: - # -- PodLogs version - apiVersion: monitoring.grafana.com/v1alpha1 - # -- PodLogs annotations - annotations: {} - # -- Additional PodLogs labels - labels: {} - # -- PodLogs relabel configs to apply to samples before scraping - # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - relabelings: [] - # -- Additional pipeline stages to process logs after scraping - # https://grafana.com/docs/agent/latest/operator/api/#pipelinestagespec-a-namemonitoringgrafanacomv1alpha1pipelinestagespeca - additionalPipelineStages: [] - # LogsInstance configuration - logsInstance: - # -- LogsInstance annotations - annotations: {} - # -- Additional LogsInstance labels - labels: {} - # -- Additional clients for remote write - clients: null - # The Loki canary pushes logs to and queries from this loki installation to test - # that it's working correctly - lokiCanary: - enabled: true - # -- The name of the label to look for at loki when doing the checks. - labelname: pod - # -- Additional annotations for the `loki-canary` Daemonset + # -- Hosts configuration for the gateway ingress, passed through the `tpl` function to allow templating + hosts: + - host: gateway.loki.example.com + paths: + - path: / + # -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.) might also be required by some Ingress Controllers + # pathType: Prefix + # -- TLS configuration for the gateway ingress. Hosts passed through the `tpl` function to allow templating + tls: + - secretName: loki-gateway-tls + hosts: + - gateway.loki.example.com + # Basic auth configuration + basicAuth: + # -- Enables basic authentication for the gateway + enabled: false + # -- The basic auth username for the gateway + username: null + # -- The basic auth password for the gateway + password: null + # -- Uses the specified users from the `loki.tenants` list to create the htpasswd file + # if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used + # The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes + # high CPU load. + htpasswd: >- + {{ if .Values.loki.tenants }} + + {{- range $t := .Values.loki.tenants }} + {{ htpasswd (required "All tenants must have a 'name' set" $t.name) (required "All tenants must have a 'password' set" $t.password) }} + + {{- end }} + {{ else }} {{ htpasswd (required "'gateway.basicAuth.username' is required" .Values.gateway.basicAuth.username) (required "'gateway.basicAuth.password' is required" .Values.gateway.basicAuth.password) }} {{ end }} + # -- Existing basic auth secret to use. Must contain '.htpasswd' + existingSecret: null + # Configures the readiness probe for the gateway + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: 15 + timeoutSeconds: 1 + nginxConfig: + # -- Which schema to be used when building URLs. Can be 'http' or 'https'. + schema: http + # -- Enable listener for IPv6, disable on IPv4-only systems + enableIPv6: true + # -- NGINX log format + logFormat: |- + main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + # -- Allows appending custom configuration to the server block + serverSnippet: "" + # -- Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating + httpSnippet: >- + {{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }} + # -- Whether ssl should be appended to the listen directive of the server block or not. + ssl: false + # -- Override Read URL + customReadUrl: null + # -- Override Write URL + customWriteUrl: null + # -- Override Backend URL + customBackendUrl: null + # -- Allows overriding the DNS resolver address nginx will use. + resolver: "" + # -- Config file contents for Nginx. Passed through the `tpl` function to allow templating + # @default -- See values.yaml + file: | + {{- include "loki.nginxFile" . | indent 2 -}} + +# -- If running enterprise and using the default enterprise gateway, configs go here. +enterpriseGateway: + # -- Define the amount of instances + replicas: 1 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + # -- Additional CLI arguments for the `gateway` target + extraArgs: {} + # -- Additional labels for the `gateway` Pod + labels: {} + # -- Additional annotations for the `gateway` Pod + annotations: {} + # -- Additional labels and annotations for the `gateway` Service + # -- Service overriding service type + service: + type: ClusterIP + labels: {} + annotations: {} + # -- Run container as user `enterprise-logs(uid=10001)` + podSecurityContext: + runAsNonRoot: true + runAsGroup: 10001 + runAsUser: 10001 + fsGroup: 10001 + containerSecurityContext: + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + # -- If you want to use your own proxy URLs, set this to false. + useDefaultProxyURLs: true + # -- update strategy + strategy: + type: RollingUpdate + # -- Readiness probe + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + # -- Request and limit Kubernetes resources + # -- Values are defined in small.yaml and large.yaml + resources: {} + # -- Configure optional environment variables + env: [] + # -- Configure optional initContainers + initContainers: [] + # -- Conifgure optional extraContainers + extraContainers: [] + # -- Additional volumes for Pods + extraVolumes: [] + # -- Additional volume mounts for Pods + extraVolumeMounts: [] + # -- Affinity for gateway Pods + affinity: {} + # -- Node selector for gateway Pods + nodeSelector: {} + # -- Tolerations for gateway Pods + tolerations: [] + # -- Grace period to allow the gateway to shutdown before it is killed + terminationGracePeriodSeconds: 60 + +# -- Ingress configuration Use either this ingress or the gateway, but not both at once. +# If you enable this, make sure to disable the gateway. +# You'll need to supply authn configuration for your ingress controller. +ingress: + enabled: false + ingressClassName: "" + annotations: {} + # nginx.ingress.kubernetes.io/auth-type: basic + # nginx.ingress.kubernetes.io/auth-secret: loki-distributed-basic-auth + # nginx.ingress.kubernetes.io/auth-secret-type: auth-map + # nginx.ingress.kubernetes.io/configuration-snippet: | + # proxy_set_header X-Scope-OrgID $remote_user; + labels: {} + # blackbox.monitoring.exclude: "true" + paths: + write: + - /api/prom/push + - /loki/api/v1/push + read: + - /api/prom/tail + - /loki/api/v1/tail + - /loki/api + - /api/prom/rules + - /loki/api/v1/rules + - /prometheus/api/v1/rules + - /prometheus/api/v1/alerts + singleBinary: + - /api/prom/push + - /loki/api/v1/push + - /api/prom/tail + - /loki/api/v1/tail + - /loki/api + - /api/prom/rules + - /loki/api/v1/rules + - /prometheus/api/v1/rules + - /prometheus/api/v1/alerts + # -- Hosts configuration for the ingress, passed through the `tpl` function to allow templating + hosts: + - loki.example.com + # -- TLS configuration for the ingress. Hosts passed through the `tpl` function to allow templating + tls: [] +# - hosts: +# - loki.example.com +# secretName: loki-distributed-tls + + +###################################################################################################################### +# +# Migration +# +###################################################################################################################### + +# -- Options that may be necessary when performing a migration from another helm chart +migrate: + # -- When migrating from a distributed chart like loki-distributed or enterprise-logs + fromDistributed: + # -- Set to true if migrating from a distributed helm chart + enabled: false + # -- If migrating from a distributed service, provide the distributed deployment's + # memberlist service DNS so the new deployment can join its ring. + memberlistService: "" + + +###################################################################################################################### +# +# Single Binary Deployment +# +# For small Loki installations up to a few 10's of GB per day, or for testing and development. +# +###################################################################################################################### + +# Configuration for the single binary node(s) +singleBinary: + # -- Number of replicas for the single binary + replicas: 0 + autoscaling: + # -- Enable autoscaling + enabled: false + # -- Minimum autoscaling replicas for the single binary + minReplicas: 1 + # -- Maximum autoscaling replicas for the single binary + maxReplicas: 3 + # -- Target CPU utilisation percentage for the single binary + targetCPUUtilizationPercentage: 60 + # -- Target memory utilisation percentage for the single binary + targetMemoryUtilizationPercentage: + image: + # -- The Docker registry for the single binary image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the single binary image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the single binary image. Overrides `loki.image.tag` + tag: null + # -- The name of the PriorityClass for single binary pods + priorityClassName: null + # -- Annotations for single binary StatefulSet + annotations: {} + # -- Annotations for single binary pods + podAnnotations: {} + # -- Additional labels for each `single binary` pod + podLabels: {} + # -- Additional selector labels for each `single binary` pod + selectorLabels: {} + service: + # -- Annotations for single binary Service annotations: {} - # -- Additional labels for each `loki-canary` pod - podLabels: {} - service: - # -- Annotations for loki-canary Service - annotations: {} - # -- Additional labels for loki-canary Service - labels: {} - # -- Additional CLI arguments for the `loki-canary' command - extraArgs: [] - # -- Environment variables to add to the canary pods - extraEnv: [] - # -- Environment variables from secrets or configmaps to add to the canary pods - extraEnvFrom: [] - # -- Resource requests and limits for the canary - resources: {} - # -- DNS config for canary pods - dnsConfig: {} - # -- Node selector for canary pods - nodeSelector: {} - # -- Tolerations for canary pods - tolerations: [] - # -- The name of the PriorityClass for loki-canary pods - priorityClassName: null - # -- Image to use for loki canary - image: - # -- The Docker registry - registry: docker.io - # -- Docker image repository - repository: grafana/loki-canary - # -- Overrides the image tag whose default is the chart's appVersion - tag: null - # -- Overrides the image tag with an image digest - digest: null - # -- Docker image pull policy - pullPolicy: IfNotPresent - # -- Update strategy for the `loki-canary` Daemonset pods - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 + # -- Additional labels for single binary Service + labels: {} + # -- Comma-separated list of Loki modules to load for the single binary + targetModule: "all" + # -- Labels for single binary service + extraArgs: [] + # -- Environment variables to add to the single binary pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the single binary pods + extraEnvFrom: [] + # -- Extra containers to add to the single binary loki pod + extraContainers: [] + # -- Init containers to add to the single binary pods + initContainers: [] + # -- Volume mounts to add to the single binary pods + extraVolumeMounts: [] + # -- Volumes to add to the single binary pods + extraVolumes: [] + # -- Resource requests and limits for the single binary + resources: {} + # -- Grace period to allow the single binary to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Affinity for single binary pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: single-binary + topologyKey: kubernetes.io/hostname + # -- DNS config for single binary pods + dnsConfig: {} + # -- Node selector for single binary pods + nodeSelector: {} + # -- Tolerations for single binary pods + tolerations: [] + persistence: + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: true + # -- Enable persistent disk + enabled: true + # -- Size of persistent disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Selector for persistent disk + selector: null + +###################################################################################################################### +# +# Simple Scalable Deployment (SSD) Mode +# +# For small to medium size Loki deployments up to around 1 TB/day, this is the default mode for this helm chart +# +###################################################################################################################### + # Configuration for the write pod(s) write: # -- Number of replicas for the write replicas: 3 autoscaling: - # -- Enable autoscaling for the write. + # -- Enable autoscaling for the write. + enabled: false + # -- Minimum autoscaling replicas for the write. + minReplicas: 2 + # -- Maximum autoscaling replicas for the write. + maxReplicas: 6 + # -- Target CPU utilisation percentage for the write. + targetCPUUtilizationPercentage: 60 + # -- Target memory utilization percentage for the write. + targetMemoryUtilizationPercentage: + # -- Behavior policies while scaling. + behavior: + # -- see https://github.com/grafana/loki/blob/main/docs/sources/operations/storage/wal.md#how-to-scale-updown for scaledown details + scaleUp: + policies: + - type: Pods + value: 1 + periodSeconds: 900 + scaleDown: + policies: + - type: Pods + value: 1 + periodSeconds: 1800 + stabilizationWindowSeconds: 3600 + image: + # -- The Docker registry for the write image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the write image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the write image. Overrides `loki.image.tag` + tag: null + # -- The name of the PriorityClass for write pods + priorityClassName: null + # -- Annotations for write StatefulSet + annotations: {} + # -- Annotations for write pods + podAnnotations: {} + # -- Additional labels for each `write` pod + podLabels: {} + # -- Additional selector labels for each `write` pod + selectorLabels: {} + service: + # -- Annotations for write Service + annotations: {} + # -- Additional labels for write Service + labels: {} + # -- Comma-separated list of Loki modules to load for the write + targetModule: "write" + # -- Additional CLI args for the write + extraArgs: [] + # -- Environment variables to add to the write pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the write pods + extraEnvFrom: [] + # -- Lifecycle for the write container + lifecycle: {} + # -- The default /flush_shutdown preStop hook is recommended as part of the ingester + # scaledown process so it's added to the template by default when autoscaling is enabled, + # but it's disabled to optimize rolling restarts in instances that will never be scaled + # down or when using chunks storage with WAL disabled. + # https://github.com/grafana/loki/blob/main/docs/sources/operations/storage/wal.md#how-to-scale-updown + # -- Init containers to add to the write pods + initContainers: [] + # -- Containers to add to the write pods + extraContainers: [] + # -- Volume mounts to add to the write pods + extraVolumeMounts: [] + # -- Volumes to add to the write pods + extraVolumes: [] + # -- volumeClaimTemplates to add to StatefulSet + extraVolumeClaimTemplates: [] + # -- Resource requests and limits for the write + resources: {} + # -- Grace period to allow the write to shutdown before it is killed. Especially for the ingester, + # this must be increased. It must be long enough so writes can be gracefully shutdown flushing/transferring + # all data and to successfully leave the member ring on shutdown. + terminationGracePeriodSeconds: 300 + # -- Affinity for write pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: write + topologyKey: kubernetes.io/hostname + # -- DNS config for write pods + dnsConfig: {} + # -- Node selector for write pods + nodeSelector: {} + # -- Topology Spread Constraints for write pods + topologySpreadConstraints: [] + # -- Tolerations for write pods + tolerations: [] + # -- The default is to deploy all pods in parallel. + podManagementPolicy: "Parallel" + persistence: + # -- Enable volume claims in pod spec + volumeClaimsEnabled: true + # -- Parameters used for the `data` volume when volumeClaimEnabled if false + dataVolumeParameters: + emptyDir: {} + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: false + # -- Size of persistent disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Selector for persistent disk + selector: null + +# -- Configuration for the read pod(s) +read: + # -- Number of replicas for the read + replicas: 3 + autoscaling: + # -- Enable autoscaling for the read, this is only used if `queryIndex.enabled: true` + enabled: false + # -- Minimum autoscaling replicas for the read + minReplicas: 2 + # -- Maximum autoscaling replicas for the read + maxReplicas: 6 + # -- Target CPU utilisation percentage for the read + targetCPUUtilizationPercentage: 60 + # -- Target memory utilisation percentage for the read + targetMemoryUtilizationPercentage: + # -- Behavior policies while scaling. + behavior: {} + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 60 + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + image: + # -- The Docker registry for the read image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the read image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the read image. Overrides `loki.image.tag` + tag: null + # -- The name of the PriorityClass for read pods + priorityClassName: null + # -- Annotations for read deployment + annotations: {} + # -- Annotations for read pods + podAnnotations: {} + # -- Additional labels for each `read` pod + podLabels: {} + # -- Additional selector labels for each `read` pod + selectorLabels: {} + service: + # -- Annotations for read Service + annotations: {} + # -- Additional labels for read Service + labels: {} + # -- Comma-separated list of Loki modules to load for the read + targetModule: "read" + # -- Whether or not to use the 2 target type simple scalable mode (read, write) or the + # 3 target type (read, write, backend). Legacy refers to the 2 target type, so true will + # run two targets, false will run 3 targets. + legacyReadTarget: false + # -- Additional CLI args for the read + extraArgs: [] + # -- Containers to add to the read pods + extraContainers: [] + # -- Environment variables to add to the read pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the read pods + extraEnvFrom: [] + # -- Lifecycle for the read container + lifecycle: {} + # -- Volume mounts to add to the read pods + extraVolumeMounts: [] + # -- Volumes to add to the read pods + extraVolumes: [] + # -- Resource requests and limits for the read + resources: {} + # -- Grace period to allow the read to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Affinity for read pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: read + topologyKey: kubernetes.io/hostname + # -- DNS config for read pods + dnsConfig: {} + # -- Node selector for read pods + nodeSelector: {} + # -- Topology Spread Constraints for read pods + topologySpreadConstraints: [] + # -- Tolerations for read pods + tolerations: [] + # -- The default is to deploy all pods in parallel. + podManagementPolicy: "Parallel" + persistence: + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: true + # -- Size of persistent disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Selector for persistent disk + selector: null + +# -- Configuration for the backend pod(s) +backend: + # -- Number of replicas for the backend + replicas: 3 + autoscaling: + # -- Enable autoscaling for the backend. enabled: false - # -- Minimum autoscaling replicas for the write. - minReplicas: 2 - # -- Maximum autoscaling replicas for the write. + # -- Minimum autoscaling replicas for the backend. + minReplicas: 3 + # -- Maximum autoscaling replicas for the backend. maxReplicas: 6 - # -- Target CPU utilisation percentage for the write. + # -- Target CPU utilization percentage for the backend. targetCPUUtilizationPercentage: 60 - # -- Target memory utilization percentage for the write. + # -- Target memory utilization percentage for the backend. targetMemoryUtilizationPercentage: # -- Behavior policies while scaling. - behavior: - # -- see https://github.com/grafana/loki/blob/main/docs/sources/operations/storage/wal.md#how-to-scale-updown for scaledown details - scaleUp: - policies: - - type: Pods - value: 1 - periodSeconds: 900 - scaleDown: - policies: - - type: Pods - value: 1 - periodSeconds: 1800 - stabilizationWindowSeconds: 3600 + behavior: {} + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 60 + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 image: - # -- The Docker registry for the write image. Overrides `loki.image.registry` + # -- The Docker registry for the backend image. Overrides `loki.image.registry` registry: null - # -- Docker image repository for the write image. Overrides `loki.image.repository` + # -- Docker image repository for the backend image. Overrides `loki.image.repository` repository: null - # -- Docker image tag for the write image. Overrides `loki.image.tag` + # -- Docker image tag for the backend image. Overrides `loki.image.tag` tag: null - # -- The name of the PriorityClass for write pods + # -- The name of the PriorityClass for backend pods priorityClassName: null - # -- Annotations for write StatefulSet + # -- Annotations for backend StatefulSet annotations: {} - # -- Annotations for write pods + # -- Annotations for backend pods podAnnotations: {} - # -- Additional labels for each `write` pod + # -- Additional labels for each `backend` pod podLabels: {} - # -- Additional selector labels for each `write` pod + # -- Additional selector labels for each `backend` pod selectorLabels: {} service: - # -- Annotations for write Service + # -- Annotations for backend Service annotations: {} - # -- Additional labels for write Service + # -- Additional labels for backend Service labels: {} - # -- Comma-separated list of Loki modules to load for the write - targetModule: "write" - # -- Additional CLI args for the write + # -- Comma-separated list of Loki modules to load for the read + targetModule: "backend" + # -- Additional CLI args for the backend + extraArgs: [] + # -- Environment variables to add to the backend pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the backend pods + extraEnvFrom: [] + # -- Init containers to add to the backend pods + initContainers: [] + # -- Volume mounts to add to the backend pods + extraVolumeMounts: [] + # -- Volumes to add to the backend pods + extraVolumes: [] + # -- Resource requests and limits for the backend + resources: {} + # -- Grace period to allow the backend to shutdown before it is killed. Especially for the ingester, + # this must be increased. It must be long enough so backends can be gracefully shutdown flushing/transferring + # all data and to successfully leave the member ring on shutdown. + terminationGracePeriodSeconds: 300 + # -- Affinity for backend pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: backend + topologyKey: kubernetes.io/hostname + # -- DNS config for backend pods + dnsConfig: {} + # -- Node selector for backend pods + nodeSelector: {} + # -- Topology Spread Constraints for backend pods + topologySpreadConstraints: [] + # -- Tolerations for backend pods + tolerations: [] + # -- The default is to deploy all pods in parallel. + podManagementPolicy: "Parallel" + persistence: + # -- Enable volume claims in pod spec + volumeClaimsEnabled: true + # -- Parameters used for the `data` volume when volumeClaimEnabled if false + dataVolumeParameters: + emptyDir: {} + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: true + # -- Size of persistent disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Selector for persistent disk + selector: null + +###################################################################################################################### +# +# Microservices Mode +# +# For large Loki deployments ingesting more than 1 TB/day +# +###################################################################################################################### + +# -- Configuration for the ingester +ingester: + # -- Number of replicas for the ingester, when zoneAwareReplication.enabled is true, the total + # number of replicas will match this value with each zone having 1/3rd of the total replicas. + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + autoscaling: + # -- Enable autoscaling for the ingester + enabled: false + # -- Minimum autoscaling replicas for the ingester + minReplicas: 1 + # -- Maximum autoscaling replicas for the ingester + maxReplicas: 3 + # -- Target CPU utilisation percentage for the ingester + targetCPUUtilizationPercentage: 60 + # -- Target memory utilisation percentage for the ingester + targetMemoryUtilizationPercentage: null + # -- Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics) + customMetrics: [] + # - type: Pods + # pods: + # metric: + # name: loki_lines_total + # target: + # type: AverageValue + # averageValue: 10k + behavior: + # -- Enable autoscaling behaviours + enabled: false + # -- define scale down policies, must conform to HPAScalingRules + scaleDown: {} + # -- define scale up policies, must conform to HPAScalingRules + scaleUp: {} + image: + # -- The Docker registry for the ingester image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the ingester image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the ingester image. Overrides `loki.image.tag` + tag: null + # -- Command to execute instead of defined in Docker image + command: null + priorityClassName: null + # -- Labels for ingester pods + podLabels: {} + # -- Annotations for ingester pods + podAnnotations: {} + # -- The name of the PriorityClass for ingester pods + # -- Labels for ingestor service + serviceLabels: {} + # -- Additional CLI args for the ingester + extraArgs: [] + # -- Environment variables to add to the ingester pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the ingester pods + extraEnvFrom: [] + # -- Volume mounts to add to the ingester pods + extraVolumeMounts: [] + # -- Volumes to add to the ingester pods + extraVolumes: [] + # -- Resource requests and limits for the ingester + resources: {} + # -- Containers to add to the ingester pods + extraContainers: [] + # -- Init containers to add to the ingester pods + initContainers: [] + # -- Grace period to allow the ingester to shutdown before it is killed. Especially for the ingestor, + # this must be increased. It must be long enough so ingesters can be gracefully shutdown flushing/transferring + # all data and to successfully leave the member ring on shutdown. + terminationGracePeriodSeconds: 300 + # -- Lifecycle for the ingester container + lifecycle: {} + # -- topologySpread for ingester pods. + # @default -- Defaults to allow skew no more than 1 node + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/component: ingester + # -- Affinity for ingester pods. Ignored if zoneAwareReplication is enabled. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: ingester + topologyKey: kubernetes.io/hostname + # -- Pod Disruption Budget maxUnavailable + maxUnavailable: 1 + # -- Node selector for ingester pods + nodeSelector: {} + # -- Tolerations for ingester pods + tolerations: [] + # -- readiness probe settings for ingester pods. If empty, use `loki.readinessProbe` + readinessProbe: {} + # -- liveness probe settings for ingester pods. If empty use `loki.livenessProbe` + livenessProbe: {} + persistence: + # -- Enable creating PVCs which is required when using boltdb-shipper + enabled: false + # -- Use emptyDir with ramdisk for storage. **Please note that all data in ingester will be lost on pod restart** + inMemory: false + # -- List of the ingester PVCs + # @notationType -- list + claims: + - name: data + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # - name: wal + # size: 150Gi + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: false + whenDeleted: Retain + whenScaled: Retain + # -- Adds the appProtocol field to the ingester service. This allows ingester to work with istio protocol selection. + appProtocol: + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + grpc: "" + # -- Enabling zone awareness on ingesters will create 3 statefulests where all writes will send a replica to each zone. + # This is primarily intended to accelerate rollout operations by allowing for multiple ingesters within a single + # zone to be shutdown and restart simultaneously (the remaining 2 zones will be guaranteed to have at least one copy + # of the data). + # Note: This can be used to run Loki over multiple cloud provider availability zones however this is not currently + # recommended as Loki is not optimized for this and cross zone network traffic costs can become extremely high + # extremely quickly. Even with zone awareness enabled, it is recommended to run Loki in a single availability zone. + zoneAwareReplication: + # -- Enable zone awareness. + enabled: true + # -- The percent of replicas in each zone that will be restarted at once. In a value of 0-100 + maxUnavailablePct: 33 + # -- zoneA configuration + zoneA: + # -- optionally define a node selector for this zone + nodeSelector: null + # -- optionally define extra affinity rules, by default different zones are not allowed to schedule on the same host + extraAffinity: {} + # -- Specific annotations to add to zone A statefulset + annotations: {} + # -- Specific annotations to add to zone A pods + podAnnotations: {} + zoneB: + # -- optionally define a node selector for this zone + nodeSelector: null + # -- optionally define extra affinity rules, by default different zones are not allowed to schedule on the same host + extraAffinity: {} + # -- Specific annotations to add to zone B statefulset + annotations: {} + # -- Specific annotations to add to zone B pods + podAnnotations: {} + zoneC: + # -- optionally define a node selector for this zone + nodeSelector: null + # -- optionally define extra affinity rules, by default different zones are not allowed to schedule on the same host + extraAffinity: {} + # -- Specific annotations to add to zone C statefulset + annotations: {} + # -- Specific annotations to add to zone C pods + podAnnotations: {} + # -- The migration block allows migrating non zone aware ingesters to zone aware ingesters. + migration: + enabled: false + excludeDefaultZone: false + readPath: false + writePath: false + +# -- Configuration for the distributor +distributor: + # -- Number of replicas for the distributor + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + autoscaling: + # -- Enable autoscaling for the distributor + enabled: false + # -- Minimum autoscaling replicas for the distributor + minReplicas: 1 + # -- Maximum autoscaling replicas for the distributor + maxReplicas: 3 + # -- Target CPU utilisation percentage for the distributor + targetCPUUtilizationPercentage: 60 + # -- Target memory utilisation percentage for the distributor + targetMemoryUtilizationPercentage: null + # -- Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics) + customMetrics: [] + # - type: Pods + # pods: + # metric: + # name: loki_lines_total + # target: + # type: AverageValue + # averageValue: 10k + behavior: + # -- Enable autoscaling behaviours + enabled: false + # -- define scale down policies, must conform to HPAScalingRules + scaleDown: {} + # -- define scale up policies, must conform to HPAScalingRules + scaleUp: {} + image: + # -- The Docker registry for the distributor image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the distributor image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the distributor image. Overrides `loki.image.tag` + tag: null + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for distributor pods + priorityClassName: null + # -- Labels for distributor pods + podLabels: {} + # -- Annotations for distributor pods + podAnnotations: {} + # -- Labels for distributor service + serviceLabels: {} + # -- Additional CLI args for the distributor + extraArgs: [] + # -- Environment variables to add to the distributor pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the distributor pods + extraEnvFrom: [] + # -- Volume mounts to add to the distributor pods + extraVolumeMounts: [] + # -- Volumes to add to the distributor pods + extraVolumes: [] + # -- Resource requests and limits for the distributor + resources: {} + # -- Containers to add to the distributor pods + extraContainers: [] + # -- Grace period to allow the distributor to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Affinity for distributor pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: distributor + topologyKey: kubernetes.io/hostname + # -- Pod Disruption Budget maxUnavailable + maxUnavailable: null + # -- Max Surge for distributor pods + maxSurge: 0 + # -- Node selector for distributor pods + nodeSelector: {} + # -- Tolerations for distributor pods + tolerations: [] + # -- Adds the appProtocol field to the distributor service. This allows distributor to work with istio protocol selection. + appProtocol: + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + grpc: "" + +# -- Configuration for the querier +querier: + # -- Number of replicas for the querier + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + autoscaling: + # -- Enable autoscaling for the querier, this is only used if `indexGateway.enabled: true` + enabled: false + # -- Minimum autoscaling replicas for the querier + minReplicas: 1 + # -- Maximum autoscaling replicas for the querier + maxReplicas: 3 + # -- Target CPU utilisation percentage for the querier + targetCPUUtilizationPercentage: 60 + # -- Target memory utilisation percentage for the querier + targetMemoryUtilizationPercentage: null + # -- Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics) + customMetrics: [] + # - type: External + # external: + # metric: + # name: loki_inflight_queries + # target: + # type: AverageValue + # averageValue: 12 + behavior: + # -- Enable autoscaling behaviours + enabled: false + # -- define scale down policies, must conform to HPAScalingRules + scaleDown: {} + # -- define scale up policies, must conform to HPAScalingRules + scaleUp: {} + image: + # -- The Docker registry for the querier image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the querier image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the querier image. Overrides `loki.image.tag` + tag: null + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for querier pods + priorityClassName: null + # -- Labels for querier pods + podLabels: {} + # -- Annotations for querier pods + podAnnotations: {} + # -- Labels for querier service + serviceLabels: {} + # -- Additional CLI args for the querier extraArgs: [] - # -- Environment variables to add to the write pods + # -- Environment variables to add to the querier pods extraEnv: [] - # -- Environment variables from secrets or configmaps to add to the write pods + # -- Environment variables from secrets or configmaps to add to the querier pods extraEnvFrom: [] - # -- Lifecycle for the write container - lifecycle: {} - # -- The default /flush_shutdown preStop hook is recommended as part of the ingester - # scaledown process so it's added to the template by default when autoscaling is enabled, - # but it's disabled to optimize rolling restarts in instances that will never be scaled - # down or when using chunks storage with WAL disabled. - # https://github.com/grafana/loki/blob/main/docs/sources/operations/storage/wal.md#how-to-scale-updown - # -- Init containers to add to the write pods - initContainers: [] - # -- Containers to add to the write pods - extraContainers: [] - # -- Volume mounts to add to the write pods + # -- Volume mounts to add to the querier pods extraVolumeMounts: [] - # -- Volumes to add to the write pods + # -- Volumes to add to the querier pods extraVolumes: [] - # -- volumeClaimTemplates to add to StatefulSet - extraVolumeClaimTemplates: [] - # -- Resource requests and limits for the write + # -- Resource requests and limits for the querier resources: {} - # -- Grace period to allow the write to shutdown before it is killed. Especially for the ingester, - # this must be increased. It must be long enough so writes can be gracefully shutdown flushing/transferring - # all data and to successfully leave the member ring on shutdown. - terminationGracePeriodSeconds: 300 - # -- Affinity for write pods. Passed through `tpl` and, thus, to be configured as string - # @default -- Hard node and soft zone anti-affinity - affinity: | + # -- Containers to add to the querier pods + extraContainers: [] + # -- Init containers to add to the querier pods + initContainers: [] + # -- Grace period to allow the querier to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- topologySpread for querier pods. + # @default -- Defaults to allow skew no more then 1 node + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/component: querier + # -- Affinity for querier pods. + # @default -- Hard node anti-affinity + affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: - {{- include "loki.writeSelectorLabels" . | nindent 10 }} + app.kubernetes.io/component: querier topologyKey: kubernetes.io/hostname - # -- DNS config for write pods - dnsConfig: {} - # -- Node selector for write pods + # -- Pod Disruption Budget maxUnavailable + maxUnavailable: null + # -- Max Surge for querier pods + maxSurge: 0 + # -- Node selector for querier pods nodeSelector: {} - # -- Topology Spread Constraints for write pods - topologySpreadConstraints: [] - # -- Tolerations for write pods + # -- Tolerations for querier pods tolerations: [] - # -- The default is to deploy all pods in parallel. - podManagementPolicy: "Parallel" + # -- DNSConfig for querier pods + dnsConfig: {} persistence: - # -- Enable volume claims in pod spec - volumeClaimsEnabled: true - # -- Parameters used for the `data` volume when volumeClaimEnabled if false - dataVolumeParameters: - emptyDir: {} - # -- Enable StatefulSetAutoDeletePVC feature - enableStatefulSetAutoDeletePVC: false + # -- Enable creating PVCs for the querier cache + enabled: false # -- Size of persistent disk size: 10Gi # -- Storage class to be used. @@ -867,173 +2018,420 @@ write: # If empty or set to null, no storageClassName spec is # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). storageClass: null - # -- Selector for persistent disk - selector: null -# Configuration for the table-manager -tableManager: - # -- Specifies whether the table-manager should be enabled - enabled: false + # -- Annotations for querier PVCs + annotations: {} + # -- Adds the appProtocol field to the querier service. This allows querier to work with istio protocol selection. + appProtocol: + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + grpc: "" + +# -- Configuration for the query-frontend +queryFrontend: + # -- Number of replicas for the query-frontend + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + autoscaling: + # -- Enable autoscaling for the query-frontend + enabled: false + # -- Minimum autoscaling replicas for the query-frontend + minReplicas: 1 + # -- Maximum autoscaling replicas for the query-frontend + maxReplicas: 3 + # -- Target CPU utilisation percentage for the query-frontend + targetCPUUtilizationPercentage: 60 + # -- Target memory utilisation percentage for the query-frontend + targetMemoryUtilizationPercentage: null + # -- Allows one to define custom metrics using the HPA/v2 schema (for example, Pods, Object or External metrics) + customMetrics: [] + # - type: Pods + # pods: + # metric: + # name: loki_query_rate + # target: + # type: AverageValue + # averageValue: 100 + behavior: + # -- Enable autoscaling behaviours + enabled: false + # -- define scale down policies, must conform to HPAScalingRules + scaleDown: {} + # -- define scale up policies, must conform to HPAScalingRules + scaleUp: {} image: - # -- The Docker registry for the table-manager image. Overrides `loki.image.registry` + # -- The Docker registry for the query-frontend image. Overrides `loki.image.registry` registry: null - # -- Docker image repository for the table-manager image. Overrides `loki.image.repository` + # -- Docker image repository for the query-frontend image. Overrides `loki.image.repository` repository: null - # -- Docker image tag for the table-manager image. Overrides `loki.image.tag` + # -- Docker image tag for the query-frontend image. Overrides `loki.image.tag` tag: null # -- Command to execute instead of defined in Docker image command: null - # -- The name of the PriorityClass for table-manager pods + # -- The name of the PriorityClass for query-frontend pods priorityClassName: null - # -- Labels for table-manager pods + # -- Labels for query-frontend pods podLabels: {} - # -- Annotations for table-manager deployment - annotations: {} - # -- Annotations for table-manager pods + # -- Annotations for query-frontend pods podAnnotations: {} - service: - # -- Annotations for table-manager Service - annotations: {} - # -- Additional labels for table-manager Service - labels: {} - # -- Additional CLI args for the table-manager + # -- Labels for query-frontend service + serviceLabels: {} + # -- Additional CLI args for the query-frontend extraArgs: [] - # -- Environment variables to add to the table-manager pods + # -- Environment variables to add to the query-frontend pods extraEnv: [] - # -- Environment variables from secrets or configmaps to add to the table-manager pods + # -- Environment variables from secrets or configmaps to add to the query-frontend pods extraEnvFrom: [] - # -- Volume mounts to add to the table-manager pods + # -- Volume mounts to add to the query-frontend pods extraVolumeMounts: [] - # -- Volumes to add to the table-manager pods + # -- Volumes to add to the query-frontend pods extraVolumes: [] - # -- Resource requests and limits for the table-manager + # -- Resource requests and limits for the query-frontend resources: {} - # -- Containers to add to the table-manager pods + # -- Containers to add to the query-frontend pods extraContainers: [] - # -- Grace period to allow the table-manager to shutdown before it is killed + # -- Grace period to allow the query-frontend to shutdown before it is killed terminationGracePeriodSeconds: 30 - # -- Affinity for table-manager pods. Passed through `tpl` and, thus, to be configured as string - # @default -- Hard node and soft zone anti-affinity - affinity: | + # -- Affinity for query-frontend pods. + # @default -- Hard node anti-affinity + affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: - {{- include "loki.tableManagerSelectorLabels" . | nindent 10 }} + app.kubernetes.io/component: query-frontend topologyKey: kubernetes.io/hostname - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchLabels: - {{- include "loki.tableManagerSelectorLabels" . | nindent 12 }} - topologyKey: failure-domain.beta.kubernetes.io/zone - # -- DNS config table-manager pods - dnsConfig: {} - # -- Node selector for table-manager pods + # -- Pod Disruption Budget maxUnavailable + maxUnavailable: null + # -- Node selector for query-frontend pods nodeSelector: {} - # -- Tolerations for table-manager pods + # -- Tolerations for query-frontend pods tolerations: [] - # -- Enable deletes by retention - retention_deletes_enabled: false - # -- Set retention period - retention_period: 0 -# Configuration for the read pod(s) -read: - # -- Number of replicas for the read - replicas: 3 - autoscaling: - # -- Enable autoscaling for the read, this is only used if `queryIndex.enabled: true` - enabled: false - # -- Minimum autoscaling replicas for the read - minReplicas: 2 - # -- Maximum autoscaling replicas for the read - maxReplicas: 6 - # -- Target CPU utilisation percentage for the read - targetCPUUtilizationPercentage: 60 - # -- Target memory utilisation percentage for the read - targetMemoryUtilizationPercentage: - # -- Behavior policies while scaling. - behavior: {} - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 60 - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 + # -- Adds the appProtocol field to the queryFrontend service. This allows queryFrontend to work with istio protocol selection. + appProtocol: + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + grpc: "" + +# -- Configuration for the query-scheduler +queryScheduler: + # -- Number of replicas for the query-scheduler. + # It should be lower than `-querier.max-concurrent` to avoid generating back-pressure in queriers; + # it's also recommended that this value evenly divides the latter + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld image: - # -- The Docker registry for the read image. Overrides `loki.image.registry` + # -- The Docker registry for the query-scheduler image. Overrides `loki.image.registry` registry: null - # -- Docker image repository for the read image. Overrides `loki.image.repository` + # -- Docker image repository for the query-scheduler image. Overrides `loki.image.repository` repository: null - # -- Docker image tag for the read image. Overrides `loki.image.tag` + # -- Docker image tag for the query-scheduler image. Overrides `loki.image.tag` tag: null - # -- The name of the PriorityClass for read pods + # -- The name of the PriorityClass for query-scheduler pods priorityClassName: null - # -- Annotations for read deployment - annotations: {} - # -- Annotations for read pods + # -- Labels for query-scheduler pods + podLabels: {} + # -- Annotations for query-scheduler pods podAnnotations: {} - # -- Additional labels for each `read` pod + # -- Labels for query-scheduler service + serviceLabels: {} + # -- Additional CLI args for the query-scheduler + extraArgs: [] + # -- Environment variables to add to the query-scheduler pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the query-scheduler pods + extraEnvFrom: [] + # -- Volume mounts to add to the query-scheduler pods + extraVolumeMounts: [] + # -- Volumes to add to the query-scheduler pods + extraVolumes: [] + # -- Resource requests and limits for the query-scheduler + resources: {} + # -- Containers to add to the query-scheduler pods + extraContainers: [] + # -- Grace period to allow the query-scheduler to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Affinity for query-scheduler pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: query-scheduler + topologyKey: kubernetes.io/hostname + # -- Pod Disruption Budget maxUnavailable + maxUnavailable: 1 + # -- Node selector for query-scheduler pods + nodeSelector: {} + # -- Tolerations for query-scheduler pods + tolerations: [] + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + appProtocol: + grpc: "" + +# -- Configuration for the index-gateway +indexGateway: + # -- Number of replicas for the index-gateway + replicas: 0 + # -- Whether the index gateway should join the memberlist hashring + joinMemberlist: true + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + image: + # -- The Docker registry for the index-gateway image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the index-gateway image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the index-gateway image. Overrides `loki.image.tag` + tag: null + # -- The name of the PriorityClass for index-gateway pods + priorityClassName: null + # -- Labels for index-gateway pods podLabels: {} - # -- Additional selector labels for each `read` pod - selectorLabels: {} - service: - # -- Annotations for read Service - annotations: {} - # -- Additional labels for read Service - labels: {} - # -- Comma-separated list of Loki modules to load for the read - targetModule: "read" - # -- Whether or not to use the 2 target type simple scalable mode (read, write) or the - # 3 target type (read, write, backend). Legacy refers to the 2 target type, so true will - # run two targets, false will run 3 targets. - legacyReadTarget: false - # -- Additional CLI args for the read + # -- Annotations for index-gateway pods + podAnnotations: {} + # -- Labels for index-gateway service + serviceLabels: {} + # -- Additional CLI args for the index-gateway extraArgs: [] - # -- Containers to add to the read pods + # -- Environment variables to add to the index-gateway pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the index-gateway pods + extraEnvFrom: [] + # -- Volume mounts to add to the index-gateway pods + extraVolumeMounts: [] + # -- Volumes to add to the index-gateway pods + extraVolumes: [] + # -- Resource requests and limits for the index-gateway + resources: {} + # -- Containers to add to the index-gateway pods extraContainers: [] - # -- Environment variables to add to the read pods + # -- Init containers to add to the index-gateway pods + initContainers: [] + # -- Grace period to allow the index-gateway to shutdown before it is killed. + terminationGracePeriodSeconds: 300 + # -- Affinity for index-gateway pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: index-gateway + topologyKey: kubernetes.io/hostname + # -- Pod Disruption Budget maxUnavailable + maxUnavailable: null + # -- Node selector for index-gateway pods + nodeSelector: {} + # -- Tolerations for index-gateway pods + tolerations: [] + persistence: + # -- Enable creating PVCs which is required when using boltdb-shipper + enabled: false + # -- Use emptyDir with ramdisk for storage. **Please note that all data in indexGateway will be lost on pod restart** + inMemory: false + # -- Size of persistent or memory disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Annotations for index gateway PVCs + annotations: {} + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: false + whenDeleted: Retain + whenScaled: Retain + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + appProtocol: + grpc: "" + +# -- Configuration for the compactor +compactor: + # -- Number of replicas for the compactor + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + image: + # -- The Docker registry for the compactor image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the compactor image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the compactor image. Overrides `loki.image.tag` + tag: null + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for compactor pods + priorityClassName: null + # -- Labels for compactor pods + podLabels: {} + # -- Annotations for compactor pods + podAnnotations: {} + # -- Affinity for compactor pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: compactor + topologyKey: kubernetes.io/hostname + # -- Labels for compactor service + serviceLabels: {} + # -- Additional CLI args for the compactor + extraArgs: [] + # -- Environment variables to add to the compactor pods extraEnv: [] - # -- Environment variables from secrets or configmaps to add to the read pods + # -- Environment variables from secrets or configmaps to add to the compactor pods extraEnvFrom: [] - # -- Lifecycle for the read container - lifecycle: {} - # -- Volume mounts to add to the read pods + # -- Volume mounts to add to the compactor pods extraVolumeMounts: [] - # -- Volumes to add to the read pods + # -- Volumes to add to the compactor pods extraVolumes: [] - # -- Resource requests and limits for the read + # -- readiness probe settings for ingester pods. If empty, use `loki.readinessProbe` + readinessProbe: {} + # -- liveness probe settings for ingester pods. If empty use `loki.livenessProbe` + livenessProbe: {} + # -- Resource requests and limits for the compactor resources: {} - # -- Grace period to allow the read to shutdown before it is killed + # -- Containers to add to the compactor pods + extraContainers: [] + # -- Init containers to add to the compactor pods + initContainers: [] + # -- Grace period to allow the compactor to shutdown before it is killed terminationGracePeriodSeconds: 30 - # -- Affinity for read pods. Passed through `tpl` and, thus, to be configured as string - # @default -- Hard node and soft zone anti-affinity - affinity: | + # -- Node selector for compactor pods + nodeSelector: {} + # -- Tolerations for compactor pods + tolerations: [] + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + appProtocol: + grpc: "" + persistence: + # -- Enable creating PVCs for the compactor + enabled: false + # -- Size of persistent disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Annotations for compactor PVCs + annotations: {} + # -- List of the compactor PVCs + # @notationType -- list + claims: + - name: data + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # - name: wal + # size: 150Gi + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: false + whenDeleted: Retain + whenScaled: Retain + + serviceAccount: + create: false + # -- The name of the ServiceAccount to use for the compactor. + # If not set and create is true, a name is generated by appending + # "-compactor" to the common ServiceAccount. + name: null + # -- Image pull secrets for the compactor service account + imagePullSecrets: [] + # -- Annotations for the compactor service account + annotations: {} + # -- Set this toggle to false to opt out of automounting API credentials for the service account + automountServiceAccountToken: true + +# -- Configuration for the bloom gateway +bloomGateway: + # -- Number of replicas for the bloom gateway + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld + image: + # -- The Docker registry for the bloom gateway image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the bloom gateway image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the bloom gateway image. Overrides `loki.image.tag` + tag: null + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for bloom gateway pods + priorityClassName: null + # -- Labels for bloom gateway pods + podLabels: {} + # -- Annotations for bloom gateway pods + podAnnotations: {} + # -- Affinity for bloom gateway pods. + # @default -- Hard node anti-affinity + affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: - {{- include "loki.readSelectorLabels" . | nindent 10 }} + app.kubernetes.io/component: bloom-gateway topologyKey: kubernetes.io/hostname - # -- DNS config for read pods - dnsConfig: {} - # -- Node selector for read pods + # -- Labels for bloom gateway service + serviceLabels: {} + # -- Additional CLI args for the bloom gateway + extraArgs: [] + # -- Environment variables to add to the bloom gateway pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the bloom gateway pods + extraEnvFrom: [] + # -- Volume mounts to add to the bloom gateway pods + extraVolumeMounts: [] + # -- Volumes to add to the bloom gateway pods + extraVolumes: [] + # -- readiness probe settings for ingester pods. If empty, use `loki.readinessProbe` + readinessProbe: {} + # -- liveness probe settings for ingester pods. If empty use `loki.livenessProbe` + livenessProbe: {} + # -- Resource requests and limits for the bloom gateway + resources: {} + # -- Containers to add to the bloom gateway pods + extraContainers: [] + # -- Init containers to add to the bloom gateway pods + initContainers: [] + # -- Grace period to allow the bloom gateway to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Node selector for bloom gateway pods nodeSelector: {} - # -- Topology Spread Constraints for read pods - topologySpreadConstraints: [] - # -- Tolerations for read pods + # -- Tolerations for bloom gateway pods tolerations: [] - # -- The default is to deploy all pods in parallel. - podManagementPolicy: "Parallel" + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + appProtocol: + grpc: "" persistence: - # -- Enable StatefulSetAutoDeletePVC feature - enableStatefulSetAutoDeletePVC: true + # -- Enable creating PVCs for the bloom gateway + enabled: false # -- Size of persistent disk size: 10Gi # -- Storage class to be used. @@ -1042,106 +2440,106 @@ read: # If empty or set to null, no storageClassName spec is # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). storageClass: null - # -- Selector for persistent disk - selector: null -# Configuration for the backend pod(s) -backend: - # -- Number of replicas for the backend - replicas: 3 - autoscaling: - # -- Enable autoscaling for the backend. - enabled: false - # -- Minimum autoscaling replicas for the backend. - minReplicas: 3 - # -- Maximum autoscaling replicas for the backend. - maxReplicas: 6 - # -- Target CPU utilization percentage for the backend. - targetCPUUtilizationPercentage: 60 - # -- Target memory utilization percentage for the backend. - targetMemoryUtilizationPercentage: - # -- Behavior policies while scaling. - behavior: {} - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 60 - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 + # -- Annotations for bloom gateway PVCs + annotations: {} + # -- List of the bloom gateway PVCs + # @notationType -- list + claims: + - name: data + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # - name: wal + # size: 150Gi + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: false + whenDeleted: Retain + whenScaled: Retain + + serviceAccount: + create: false + # -- The name of the ServiceAccount to use for the bloom gateway. + # If not set and create is true, a name is generated by appending + # "-bloom-gateway" to the common ServiceAccount. + name: null + # -- Image pull secrets for the bloom gateway service account + imagePullSecrets: [] + # -- Annotations for the bloom gateway service account + annotations: {} + # -- Set this toggle to false to opt out of automounting API credentials for the service account + automountServiceAccountToken: true + +# -- Configuration for the bloom compactor +bloomCompactor: + # -- Number of replicas for the bloom compactor + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld image: - # -- The Docker registry for the backend image. Overrides `loki.image.registry` + # -- The Docker registry for the bloom compactor image. Overrides `loki.image.registry` registry: null - # -- Docker image repository for the backend image. Overrides `loki.image.repository` + # -- Docker image repository for the bloom compactor image. Overrides `loki.image.repository` repository: null - # -- Docker image tag for the backend image. Overrides `loki.image.tag` + # -- Docker image tag for the bloom compactor image. Overrides `loki.image.tag` tag: null - # -- The name of the PriorityClass for backend pods + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for bloom compactor pods priorityClassName: null - # -- Annotations for backend StatefulSet - annotations: {} - # -- Annotations for backend pods - podAnnotations: {} - # -- Additional labels for each `backend` pod + # -- Labels for bloom compactor pods podLabels: {} - # -- Additional selector labels for each `backend` pod - selectorLabels: {} - service: - # -- Annotations for backend Service - annotations: {} - # -- Additional labels for backend Service - labels: {} - # -- Comma-separated list of Loki modules to load for the read - targetModule: "backend" - # -- Additional CLI args for the backend + # -- Annotations for bloom compactor pods + podAnnotations: {} + # -- Affinity for bloom compactor pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: bloom-compactor + topologyKey: kubernetes.io/hostname + # -- Labels for bloom compactor service + serviceLabels: {} + # -- Additional CLI args for the bloom compactor extraArgs: [] - # -- Environment variables to add to the backend pods + # -- Environment variables to add to the bloom compactor pods extraEnv: [] - # -- Environment variables from secrets or configmaps to add to the backend pods + # -- Environment variables from secrets or configmaps to add to the bloom compactor pods extraEnvFrom: [] - # -- Init containers to add to the backend pods - initContainers: [] - # -- Volume mounts to add to the backend pods + # -- Volume mounts to add to the bloom compactor pods extraVolumeMounts: [] - # -- Volumes to add to the backend pods + # -- Volumes to add to the bloom compactor pods extraVolumes: [] - # -- Resource requests and limits for the backend + # -- readiness probe settings for ingester pods. If empty, use `loki.readinessProbe` + readinessProbe: {} + # -- liveness probe settings for ingester pods. If empty use `loki.livenessProbe` + livenessProbe: {} + # -- Resource requests and limits for the bloom compactor resources: {} - # -- Grace period to allow the backend to shutdown before it is killed. Especially for the ingester, - # this must be increased. It must be long enough so backends can be gracefully shutdown flushing/transferring - # all data and to successfully leave the member ring on shutdown. - terminationGracePeriodSeconds: 300 - # -- Affinity for backend pods. Passed through `tpl` and, thus, to be configured as string - # @default -- Hard node and soft zone anti-affinity - affinity: | - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - {{- include "loki.backendSelectorLabels" . | nindent 10 }} - topologyKey: kubernetes.io/hostname - # -- DNS config for backend pods - dnsConfig: {} - # -- Node selector for backend pods + # -- Containers to add to the bloom compactor pods + extraContainers: [] + # -- Init containers to add to the bloom compactor pods + initContainers: [] + # -- Grace period to allow the bloom compactor to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Node selector for bloom compactor pods nodeSelector: {} - # -- Topology Spread Constraints for backend pods - topologySpreadConstraints: [] - # -- Tolerations for backend pods + # -- Tolerations for bloom compactor pods tolerations: [] - # -- The default is to deploy all pods in parallel. - podManagementPolicy: "Parallel" + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + appProtocol: + grpc: "" persistence: - # -- Enable volume claims in pod spec - volumeClaimsEnabled: true - # -- Parameters used for the `data` volume when volumeClaimEnabled if false - dataVolumeParameters: - emptyDir: {} - # -- Enable StatefulSetAutoDeletePVC feature - enableStatefulSetAutoDeletePVC: true + # -- Enable creating PVCs for the bloom compactor + enabled: false # -- Size of persistent disk size: 10Gi # -- Storage class to be used. @@ -1150,392 +2548,546 @@ backend: # If empty or set to null, no storageClassName spec is # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). storageClass: null - # -- Selector for persistent disk - selector: null -# Configuration for the single binary node(s) -singleBinary: - # -- Number of replicas for the single binary + # -- Annotations for bloom compactor PVCs + annotations: {} + # -- List of the bloom compactor PVCs + # @notationType -- list + claims: + - name: data + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # - name: wal + # size: 150Gi + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: false + whenDeleted: Retain + whenScaled: Retain + + serviceAccount: + create: false + # -- The name of the ServiceAccount to use for the bloom compactor. + # If not set and create is true, a name is generated by appending + # "-bloom-compactor" to the common ServiceAccount. + name: null + # -- Image pull secrets for the bloom compactor service account + imagePullSecrets: [] + # -- Annotations for the bloom compactor service account + annotations: {} + # -- Set this toggle to false to opt out of automounting API credentials for the service account + automountServiceAccountToken: true + +# -- Configuration for the pattern ingester +patternIngester: + # -- Number of replicas for the pattern ingester replicas: 0 - autoscaling: - # -- Enable autoscaling - enabled: false - # -- Minimum autoscaling replicas for the single binary - minReplicas: 1 - # -- Maximum autoscaling replicas for the single binary - maxReplicas: 3 - # -- Target CPU utilisation percentage for the single binary - targetCPUUtilizationPercentage: 60 - # -- Target memory utilisation percentage for the single binary - targetMemoryUtilizationPercentage: + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld image: - # -- The Docker registry for the single binary image. Overrides `loki.image.registry` + # -- The Docker registry for the pattern ingester image. Overrides `loki.image.registry` registry: null - # -- Docker image repository for the single binary image. Overrides `loki.image.repository` + # -- Docker image repository for the pattern ingester image. Overrides `loki.image.repository` repository: null - # -- Docker image tag for the single binary image. Overrides `loki.image.tag` + # -- Docker image tag for the pattern ingester image. Overrides `loki.image.tag` tag: null - # -- The name of the PriorityClass for single binary pods + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for pattern ingester pods priorityClassName: null - # -- Annotations for single binary StatefulSet - annotations: {} - # -- Annotations for single binary pods - podAnnotations: {} - # -- Additional labels for each `single binary` pod + # -- Labels for pattern ingester pods podLabels: {} - # -- Additional selector labels for each `single binary` pod - selectorLabels: {} - service: - # -- Annotations for single binary Service - annotations: {} - # -- Additional labels for single binary Service - labels: {} - # -- Comma-separated list of Loki modules to load for the single binary - targetModule: "all" - # -- Labels for single binary service + # -- Annotations for pattern ingester pods + podAnnotations: {} + # -- Affinity for pattern ingester pods. + # @default -- Hard node anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: pattern-ingester + topologyKey: kubernetes.io/hostname + # -- Labels for pattern ingester service + serviceLabels: {} + # -- Additional CLI args for the pattern ingester extraArgs: [] - # -- Environment variables to add to the single binary pods + # -- Environment variables to add to the pattern ingester pods extraEnv: [] - # -- Environment variables from secrets or configmaps to add to the single binary pods + # -- Environment variables from secrets or configmaps to add to the pattern ingester pods extraEnvFrom: [] - # -- Extra containers to add to the single binary loki pod - extraContainers: [] - # -- Init containers to add to the single binary pods - initContainers: [] - # -- Volume mounts to add to the single binary pods + # -- Volume mounts to add to the pattern ingester pods extraVolumeMounts: [] - # -- Volumes to add to the single binary pods + # -- Volumes to add to the pattern ingester pods extraVolumes: [] - # -- Resource requests and limits for the single binary + # -- readiness probe settings for ingester pods. If empty, use `loki.readinessProbe` + readinessProbe: {} + # -- liveness probe settings for ingester pods. If empty use `loki.livenessProbe` + livenessProbe: {} + # -- Resource requests and limits for the pattern ingester resources: {} - # -- Grace period to allow the single binary to shutdown before it is killed + # -- Containers to add to the pattern ingester pods + extraContainers: [] + # -- Init containers to add to the pattern ingester pods + initContainers: [] + # -- Grace period to allow the pattern ingester to shutdown before it is killed terminationGracePeriodSeconds: 30 - # -- Affinity for single binary pods. Passed through `tpl` and, thus, to be configured as string - # @default -- Hard node and soft zone anti-affinity - affinity: | - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - {{- include "loki.singleBinarySelectorLabels" . | nindent 10 }} - topologyKey: kubernetes.io/hostname - # -- DNS config for single binary pods - dnsConfig: {} - # -- Node selector for single binary pods + # -- Node selector for pattern ingester pods nodeSelector: {} - # -- Tolerations for single binary pods + # -- Tolerations for pattern ingester pods tolerations: [] - persistence: - # -- Enable StatefulSetAutoDeletePVC feature - enableStatefulSetAutoDeletePVC: true - # -- Enable persistent disk - enabled: true - # -- Size of persistent disk - size: 10Gi - # -- Storage class to be used. - # If defined, storageClassName: . - # If set to "-", storageClassName: "", which disables dynamic provisioning. - # If empty or set to null, no storageClassName spec is - # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). - storageClass: null - # -- Selector for persistent disk - selector: null -# Use either this ingress or the gateway, but not both at once. -# If you enable this, make sure to disable the gateway. -# You'll need to supply authn configuration for your ingress controller. -ingress: - enabled: false - ingressClassName: "" - annotations: {} - # nginx.ingress.kubernetes.io/auth-type: basic - # nginx.ingress.kubernetes.io/auth-secret: loki-distributed-basic-auth - # nginx.ingress.kubernetes.io/auth-secret-type: auth-map - # nginx.ingress.kubernetes.io/configuration-snippet: | - # proxy_set_header X-Scope-OrgID $remote_user; - labels: {} - # blackbox.monitoring.exclude: "true" - paths: - write: - - /api/prom/push - - /loki/api/v1/push - read: - - /api/prom/tail - - /loki/api/v1/tail - - /loki/api - - /api/prom/rules - - /loki/api/v1/rules - - /prometheus/api/v1/rules - - /prometheus/api/v1/alerts - singleBinary: - - /api/prom/push - - /loki/api/v1/push - - /api/prom/tail - - /loki/api/v1/tail - - /loki/api - - /api/prom/rules - - /loki/api/v1/rules - - /prometheus/api/v1/rules - - /prometheus/api/v1/alerts - # -- Hosts configuration for the ingress, passed through the `tpl` function to allow templating - hosts: - - loki.example.com - # -- TLS configuration for the ingress. Hosts passed through the `tpl` function to allow templating - tls: [] -# - hosts: -# - loki.example.com -# secretName: loki-distributed-tls + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + appProtocol: + grpc: "" + persistence: + # -- Enable creating PVCs for the pattern ingester + enabled: false + # -- Size of persistent disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Annotations for pattern ingester PVCs + annotations: {} + # -- List of the pattern ingester PVCs + # @notationType -- list + claims: + - name: data + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # - name: wal + # size: 150Gi + # -- Enable StatefulSetAutoDeletePVC feature + enableStatefulSetAutoDeletePVC: false + whenDeleted: Retain + whenScaled: Retain -# Configuration for the memberlist service -memberlist: - service: - publishNotReadyAddresses: false -# Configuration for the gateway -gateway: - # -- Specifies whether the gateway should be enabled + serviceAccount: + create: false + # -- The name of the ServiceAccount to use for the pattern ingester. + # If not set and create is true, a name is generated by appending + # "-pattern-ingester" to the common ServiceAccount. + name: null + # -- Image pull secrets for the pattern ingester service account + imagePullSecrets: [] + # -- Annotations for the pattern ingester service account + annotations: {} + # -- Set this toggle to false to opt out of automounting API credentials for the service account + automountServiceAccountToken: true + +# -- Configuration for the ruler +ruler: + # -- The ruler component is optional and can be disabled if desired. enabled: true - # -- Number of replicas for the gateway - replicas: 1 - # -- Enable logging of 2xx and 3xx HTTP requests - verboseLogging: true - autoscaling: - # -- Enable autoscaling for the gateway - enabled: false - # -- Minimum autoscaling replicas for the gateway - minReplicas: 1 - # -- Maximum autoscaling replicas for the gateway - maxReplicas: 3 - # -- Target CPU utilisation percentage for the gateway - targetCPUUtilizationPercentage: 60 - # -- Target memory utilisation percentage for the gateway - targetMemoryUtilizationPercentage: - # -- See `kubectl explain deployment.spec.strategy` for more - # -- ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy - # -- Behavior policies while scaling. - behavior: {} - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 60 - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - deploymentStrategy: - type: RollingUpdate + # -- Number of replicas for the ruler + replicas: 0 + # -- hostAliases to add + hostAliases: [] + # - ip: 1.2.3.4 + # hostnames: + # - domain.tld image: - # -- The Docker registry for the gateway image - registry: docker.io - # -- The gateway image repository - repository: nginxinc/nginx-unprivileged - # -- The gateway image tag - tag: 1.24-alpine - # -- Overrides the gateway image tag with an image digest - digest: null - # -- The gateway image pull policy - pullPolicy: IfNotPresent - # -- The name of the PriorityClass for gateway pods + # -- The Docker registry for the ruler image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the ruler image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the ruler image. Overrides `loki.image.tag` + tag: null + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for ruler pods priorityClassName: null - # -- Annotations for gateway deployment - annotations: {} - # -- Annotations for gateway pods - podAnnotations: {} - # -- Additional labels for gateway pods + # -- Labels for compactor pods podLabels: {} - # -- Additional CLI args for the gateway + # -- Annotations for ruler pods + podAnnotations: {} + # -- Labels for ruler service + serviceLabels: {} + # -- Additional CLI args for the ruler extraArgs: [] - # -- Environment variables to add to the gateway pods + # -- Environment variables to add to the ruler pods extraEnv: [] - # -- Environment variables from secrets or configmaps to add to the gateway pods + # -- Environment variables from secrets or configmaps to add to the ruler pods extraEnvFrom: [] - # -- Lifecycle for the gateway container - lifecycle: {} - # -- Volumes to add to the gateway pods - extraVolumes: [] - # -- Volume mounts to add to the gateway pods + # -- Volume mounts to add to the ruler pods extraVolumeMounts: [] - # -- The SecurityContext for gateway containers - podSecurityContext: - fsGroup: 101 - runAsGroup: 101 - runAsNonRoot: true - runAsUser: 101 - # -- The SecurityContext for gateway containers - containerSecurityContext: - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - allowPrivilegeEscalation: false - # -- Resource requests and limits for the gateway + # -- Volumes to add to the ruler pods + extraVolumes: [] + # -- Resource requests and limits for the ruler resources: {} - # -- Containers to add to the gateway pods + # -- Containers to add to the ruler pods extraContainers: [] - # -- Grace period to allow the gateway to shutdown before it is killed - terminationGracePeriodSeconds: 30 - # -- Affinity for gateway pods. Passed through `tpl` and, thus, to be configured as string - # @default -- Hard node and soft zone anti-affinity - affinity: | + # -- Init containers to add to the ruler pods + initContainers: [] + # -- Grace period to allow the ruler to shutdown before it is killed + terminationGracePeriodSeconds: 300 + # -- Affinity for ruler pods. + # @default -- Hard node anti-affinity + affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: - {{- include "loki.gatewaySelectorLabels" . | nindent 10 }} + app.kubernetes.io/component: ruler topologyKey: kubernetes.io/hostname - # -- DNS config for gateway pods + # -- Pod Disruption Budget maxUnavailable + maxUnavailable: null + # -- Node selector for ruler pods + nodeSelector: {} + # -- Tolerations for ruler pods + tolerations: [] + # -- DNSConfig for ruler pods dnsConfig: {} - # -- Node selector for gateway pods + persistence: + # -- Enable creating PVCs which is required when using recording rules + enabled: false + # -- Size of persistent disk + size: 10Gi + # -- Storage class to be used. + # If defined, storageClassName: . + # If set to "-", storageClassName: "", which disables dynamic provisioning. + # If empty or set to null, no storageClassName spec is + # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). + storageClass: null + # -- Annotations for ruler PVCs + annotations: {} + # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https" + appProtocol: + grpc: "" + # -- Directories containing rules files + directories: {} + # tenant_foo: + # rules1.txt: | + # groups: + # - name: should_fire + # rules: + # - alert: HighPercentageError + # expr: | + # sum(rate({app="foo", env="production"} |= "error" [5m])) by (job) + # / + # sum(rate({app="foo", env="production"}[5m])) by (job) + # > 0.05 + # for: 10m + # labels: + # severity: warning + # annotations: + # summary: High error rate + # - name: credentials_leak + # rules: + # - alert: http-credentials-leaked + # annotations: + # message: "{{ $labels.job }} is leaking http basic auth credentials." + # expr: 'sum by (cluster, job, pod) (count_over_time({namespace="prod"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0)' + # for: 10m + # labels: + # severity: critical + # rules2.txt: | + # groups: + # - name: example + # rules: + # - alert: HighThroughputLogStreams + # expr: sum by(container) (rate({job=~"loki-dev/.*"}[1m])) > 1000 + # for: 2m + # tenant_bar: + # rules1.txt: | + # groups: + # - name: should_fire + # rules: + # - alert: HighPercentageError + # expr: | + # sum(rate({app="foo", env="production"} |= "error" [5m])) by (job) + # / + # sum(rate({app="foo", env="production"}[5m])) by (job) + # > 0.05 + # for: 10m + # labels: + # severity: warning + # annotations: + # summary: High error rate + # - name: credentials_leak + # rules: + # - alert: http-credentials-leaked + # annotations: + # message: "{{ $labels.job }} is leaking http basic auth credentials." + # expr: 'sum by (cluster, job, pod) (count_over_time({namespace="prod"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0)' + # for: 10m + # labels: + # severity: critical + # rules2.txt: | + # groups: + # - name: example + # rules: + # - alert: HighThroughputLogStreams + # expr: sum by(container) (rate({job=~"loki-dev/.*"}[1m])) > 1000 + # for: 2m + +memcached: + image: + # -- Memcached Docker image repository + repository: memcached + # -- Memcached Docker image tag + tag: 1.6.23-alpine + # -- Memcached Docker image pull policy + pullPolicy: IfNotPresent + # -- The SecurityContext override for memcached pods + podSecurityContext: {} + # -- The name of the PriorityClass for memcached pods + priorityClassName: null + # -- The SecurityContext for memcached containers + containerSecurityContext: + readOnlyRootFilesystem: true + capabilities: + drop: [ALL] + allowPrivilegeEscalation: false + +memcachedExporter: + # -- Whether memcached metrics should be exported + enabled: true + image: + repository: prom/memcached-exporter + tag: v0.14.2 + pullPolicy: IfNotPresent + resources: + requests: {} + limits: {} + # -- The SecurityContext for memcached exporter containers + containerSecurityContext: + readOnlyRootFilesystem: true + capabilities: + drop: [ALL] + allowPrivilegeEscalation: false + # -- Extra args to add to the exporter container. + # Example: + # extraArgs: + # memcached.tls.enable: true + # memcached.tls.cert-file: /certs/cert.crt + # memcached.tls.key-file: /certs/cert.key + # memcached.tls.ca-file: /certs/ca.crt + # memcached.tls.insecure-skip-verify: false + # memcached.tls.server-name: memcached + extraArgs: {} + +resultsCache: + # -- Specifies whether memcached based results-cache should be enabled + enabled: true + # -- Specify how long cached results should be stored in the results-cache before being expired + defaultValidity: 12h + # -- Memcached operation timeout + timeout: 500ms + # -- Total number of results-cache replicas + replicas: 1 + # -- Port of the results-cache service + port: 11211 + # -- Amount of memory allocated to results-cache for object storage (in MB). + allocatedMemory: 1024 + # -- Maximum item results-cache for memcached (in MB). + maxItemMemory: 5 + # -- Maximum number of connections allowed + connectionLimit: 16384 + # -- Max memory to use for cache write back + writebackSizeLimit: 500MB + # -- Max number of objects to use for cache write back + writebackBuffer: 500000 + # -- Number of parallel threads for cache write back + writebackParallelism: 1 + # -- Extra init containers for results-cache pods + initContainers: [] + # -- Annotations for the results-cache pods + annotations: {} + # -- Node selector for results-cache pods nodeSelector: {} - # -- Topology Spread Constraints for gateway pods + # -- Affinity for results-cache pods + affinity: {} + # -- topologySpreadConstraints allows to customize the default topologySpreadConstraints. This can be either a single dict as shown below or a slice of topologySpreadConstraints. + # labelSelector is taken from the constraint itself (if it exists) or is generated by the chart using the same selectors as for services. topologySpreadConstraints: [] - # -- Tolerations for gateway pods + # maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: ScheduleAnyway + # -- Tolerations for results-cache pods tolerations: [] - # Gateway service configuration + # -- Pod Disruption Budget + podDisruptionBudget: + maxUnavailable: 1 + # -- The name of the PriorityClass for results-cache pods + priorityClassName: null + # -- Labels for results-cache pods + podLabels: {} + # -- Annotations for results-cache pods + podAnnotations: {} + # -- Management policy for results-cache pods + podManagementPolicy: Parallel + # -- Grace period to allow the results-cache to shutdown before it is killed + terminationGracePeriodSeconds: 60 + # -- Stateful results-cache strategy + statefulStrategy: + type: RollingUpdate + # -- Add extended options for results-cache memcached container. The format is the same as for the memcached -o/--extend flag. + # Example: + # extraExtendedOptions: 'tls,modern,track_sizes' + extraExtendedOptions: "" + # -- Additional CLI args for results-cache + extraArgs: {} + # -- Additional containers to be added to the results-cache pod. + extraContainers: [] + # -- Additional volumes to be added to the results-cache pod (applies to both memcached and exporter containers). + # Example: + # extraVolumes: + # - name: extra-volume + # secret: + # secretName: extra-volume-secret + extraVolumes: [] + # -- Additional volume mounts to be added to the results-cache pod (applies to both memcached and exporter containers). + # Example: + # extraVolumeMounts: + # - name: extra-volume + # mountPath: /etc/extra-volume + # readOnly: true + extraVolumeMounts: [] + # -- Resource requests and limits for the results-cache + # By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)). + resources: null + # -- Service annotations and labels service: - # -- Port of the gateway service - port: 80 - # -- Type of the gateway service - type: ClusterIP - # -- ClusterIP of the gateway service - clusterIP: null - # -- (int) Node port if service type is NodePort - nodePort: null - # -- Load balancer IPO address if service type is LoadBalancer - loadBalancerIP: null - # -- Annotations for the gateway service - annotations: {} - # -- Labels for gateway service - labels: {} - # Gateway ingress configuration - ingress: - # -- Specifies whether an ingress for the gateway should be created - enabled: false - # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 - ingressClassName: "" - # -- Annotations for the gateway ingress annotations: {} - # -- Labels for the gateway ingress labels: {} - # -- Hosts configuration for the gateway ingress, passed through the `tpl` function to allow templating - hosts: - - host: gateway.loki.example.com - paths: - - path: / - # -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.) might also be required by some Ingress Controllers - # pathType: Prefix - # -- TLS configuration for the gateway ingress. Hosts passed through the `tpl` function to allow templating - tls: - - secretName: loki-gateway-tls - hosts: - - gateway.loki.example.com - # Basic auth configuration - basicAuth: - # -- Enables basic authentication for the gateway - enabled: false - # -- The basic auth username for the gateway - username: null - # -- The basic auth password for the gateway - password: null - # -- Uses the specified users from the `loki.tenants` list to create the htpasswd file - # if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used - # The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes - # high CPU load. - htpasswd: >- - {{ if .Values.loki.tenants }} - - {{- range $t := .Values.loki.tenants }} - {{ htpasswd (required "All tenants must have a 'name' set" $t.name) (required "All tenants must have a 'password' set" $t.password) }} - {{- end }} - {{ else }} {{ htpasswd (required "'gateway.basicAuth.username' is required" .Values.gateway.basicAuth.username) (required "'gateway.basicAuth.password' is required" .Values.gateway.basicAuth.password) }} {{ end }} - # -- Existing basic auth secret to use. Must contain '.htpasswd' - existingSecret: null - # Configures the readiness probe for the gateway - readinessProbe: - httpGet: - path: / - port: http - initialDelaySeconds: 15 - timeoutSeconds: 1 - nginxConfig: - # -- Enable listener for IPv6, disable on IPv4-only systems - enableIPv6: true - # -- NGINX log format - logFormat: |- - main '$remote_addr - $remote_user [$time_local] $status ' - '"$request" $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - # -- Allows appending custom configuration to the server block - serverSnippet: "" - # -- Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating - httpSnippet: >- - {{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }} - # -- Override Read URL - customReadUrl: null - # -- Override Write URL - customWriteUrl: null - # -- Override Backend URL - customBackendUrl: null - # -- Allows overriding the DNS resolver address nginx will use. - resolver: "" - # -- Config file contents for Nginx. Passed through the `tpl` function to allow templating - # @default -- See values.yaml - file: | - {{- include "loki.nginxFile" . | indent 2 -}} -networkPolicy: - # -- Specifies whether Network Policies should be created +chunksCache: + # -- Specifies whether memcached based chunks-cache should be enabled + enabled: true + # -- Batchsize for sending and receiving chunks from chunks cache + batchSize: 4 + # -- Parallel threads for sending and receiving chunks from chunks cache + parallelism: 5 + # -- Memcached operation timeout + timeout: 2000ms + # -- Specify how long cached chunks should be stored in the chunks-cache before being expired + defaultValidity: 0s + # -- Total number of chunks-cache replicas + replicas: 1 + # -- Port of the chunks-cache service + port: 11211 + # -- Amount of memory allocated to chunks-cache for object storage (in MB). + allocatedMemory: 8192 + # -- Maximum item memory for chunks-cache (in MB). + maxItemMemory: 5 + # -- Maximum number of connections allowed + connectionLimit: 16384 + # -- Max memory to use for cache write back + writebackSizeLimit: 500MB + # -- Max number of objects to use for cache write back + writebackBuffer: 500000 + # -- Number of parallel threads for cache write back + writebackParallelism: 1 + # -- Extra init containers for chunks-cache pods + initContainers: [] + # -- Annotations for the chunks-cache pods + annotations: {} + # -- Node selector for chunks-cache pods + nodeSelector: {} + # -- Affinity for chunks-cache pods + affinity: {} + # -- topologySpreadConstraints allows to customize the default topologySpreadConstraints. This can be either a single dict as shown below or a slice of topologySpreadConstraints. + # labelSelector is taken from the constraint itself (if it exists) or is generated by the chart using the same selectors as for services. + topologySpreadConstraints: [] + # maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: ScheduleAnyway + # -- Tolerations for chunks-cache pods + tolerations: [] + # -- Pod Disruption Budget + podDisruptionBudget: + maxUnavailable: 1 + # -- The name of the PriorityClass for chunks-cache pods + priorityClassName: null + # -- Labels for chunks-cache pods + podLabels: {} + # -- Annotations for chunks-cache pods + podAnnotations: {} + # -- Management policy for chunks-cache pods + podManagementPolicy: Parallel + # -- Grace period to allow the chunks-cache to shutdown before it is killed + terminationGracePeriodSeconds: 60 + # -- Stateful chunks-cache strategy + statefulStrategy: + type: RollingUpdate + # -- Add extended options for chunks-cache memcached container. The format is the same as for the memcached -o/--extend flag. + # Example: + # extraExtendedOptions: 'tls,no_hashexpand' + extraExtendedOptions: "" + # -- Additional CLI args for chunks-cache + extraArgs: {} + # -- Additional containers to be added to the chunks-cache pod. + extraContainers: [] + # -- Additional volumes to be added to the chunks-cache pod (applies to both memcached and exporter containers). + # Example: + # extraVolumes: + # - name: extra-volume + # secret: + # secretName: extra-volume-secret + extraVolumes: [] + # -- Additional volume mounts to be added to the chunks-cache pod (applies to both memcached and exporter containers). + # Example: + # extraVolumeMounts: + # - name: extra-volume + # mountPath: /etc/extra-volume + # readOnly: true + extraVolumeMounts: [] + # -- Resource requests and limits for the chunks-cache + # By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)). + resources: null + # -- Service annotations and labels + service: + annotations: {} + labels: {} + +###################################################################################################################### +# +# Subchart configurations +# +###################################################################################################################### +# -- Setting for the Grafana Rollout Operator https://github.com/grafana/helm-charts/tree/main/charts/rollout-operator +rollout_operator: enabled: false - # -- Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) - # or Cilium Network Policies (flavor: cilium) - flavor: kubernetes - metrics: - # -- Specifies the Pods which are allowed to access the metrics port. - # As this is cross-namespace communication, you also need the namespaceSelector. - podSelector: {} - # -- Specifies the namespaces which are allowed to access the metrics port - namespaceSelector: {} - # -- Specifies specific network CIDRs which are allowed to access the metrics port. - # In case you use namespaceSelector, you also have to specify your kubelet networks here. - # The metrics ports are also used for probes. - cidrs: [] - ingress: - # -- Specifies the Pods which are allowed to access the http port. - # As this is cross-namespace communication, you also need the namespaceSelector. - podSelector: {} - # -- Specifies the namespaces which are allowed to access the http port - namespaceSelector: {} - alertmanager: - # -- Specify the alertmanager port used for alerting - port: 9093 - # -- Specifies the alertmanager Pods. - # As this is cross-namespace communication, you also need the namespaceSelector. - podSelector: {} - # -- Specifies the namespace the alertmanager is running in - namespaceSelector: {} - externalStorage: - # -- Specify the port used for external storage, e.g. AWS S3 - ports: [] - # -- Specifies specific network CIDRs you want to limit access to - cidrs: [] - discovery: - # -- (int) Specify the port used for discovery - port: null - # -- Specifies the Pods labels used for discovery. - # As this is cross-namespace communication, you also need the namespaceSelector. - podSelector: {} - # -- Specifies the namespace the discovery Pods are running in - namespaceSelector: {} - egressWorld: - # -- Enable additional cilium egress rules to external world for write, read and backend. - enabled: false - egressKubeApiserver: - # -- Enable additional cilium egress rules to kube-apiserver for backend. - enabled: false -# ------------------------------------- -# Configuration for `minio` child chart -# ------------------------------------- + + # -- podSecurityContext is the pod security context for the rollout operator. + # When installing on OpenShift, override podSecurityContext settings with + # + # rollout_operator: + # podSecurityContext: + # fsGroup: null + # runAsGroup: null + # runAsUser: null + podSecurityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + + # Set the container security context + securityContext: + readOnlyRootFilesystem: true + capabilities: + drop: [ALL] + allowPrivilegeEscalation: false + +# -- Configuration for the minio subchart minio: enabled: false replicas: 1 @@ -1640,3 +3192,215 @@ sidecar: watchClientTimeout: 60 # -- Log level of the sidecar container. logLevel: INFO + +############################################## WARNING ############################################################### +# +# DEPRECATED VALUES +# +# The following values are deprecated and will be removed in a future version of the helm chart! +# +############################################## WARNING ############################################################## + +# -- DEPRECATED Monitoring section determines which monitoring features to enable, this section is being replaced +# by https://github.com/grafana/meta-monitoring-chart +monitoring: + # Dashboards for monitoring Loki + dashboards: + # -- If enabled, create configmap with dashboards for monitoring Loki + enabled: false + # -- Alternative namespace to create dashboards ConfigMap in + namespace: null + # -- Additional annotations for the dashboards ConfigMap + annotations: {} + # -- Labels for the dashboards ConfigMap + labels: + grafana_dashboard: "1" + # Recording rules for monitoring Loki, required for some dashboards + rules: + # -- If enabled, create PrometheusRule resource with Loki recording rules + enabled: false + # -- Include alerting rules + alerting: true + # -- Specify which individual alerts should be disabled + # -- Instead of turning off each alert one by one, set the .monitoring.rules.alerting value to false instead. + # -- If you disable all the alerts and keep .monitoring.rules.alerting set to true, the chart will fail to render. + disabled: {} + # LokiRequestErrors: true + # LokiRequestPanics: true + # -- Alternative namespace to create PrometheusRule resources in + namespace: null + # -- Additional annotations for the rules PrometheusRule resource + annotations: {} + # -- Additional labels for the rules PrometheusRule resource + labels: {} + # -- Additional labels for PrometheusRule alerts + additionalRuleLabels: {} + # -- Additional groups to add to the rules file + additionalGroups: [] + # - name: additional-loki-rules + # rules: + # - record: job:loki_request_duration_seconds_bucket:sum_rate + # expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job) + # - record: job_route:loki_request_duration_seconds_bucket:sum_rate + # expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route) + # - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + # expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container) + # ServiceMonitor configuration + serviceMonitor: + # -- If enabled, ServiceMonitor resources for Prometheus Operator are created + enabled: false + # -- Namespace selector for ServiceMonitor resources + namespaceSelector: {} + # -- ServiceMonitor annotations + annotations: {} + # -- Additional ServiceMonitor labels + labels: {} + # -- ServiceMonitor scrape interval + # Default is 15s because included recording rules use a 1m rate, and scrape interval needs to be at + # least 1/4 rate interval. + interval: 15s + # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s) + scrapeTimeout: null + # -- ServiceMonitor relabel configs to apply to samples before scraping + # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + relabelings: [] + # -- ServiceMonitor metric relabel configs to apply to samples before ingestion + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + metricRelabelings: [] + # -- ServiceMonitor will use http by default, but you can pick https as well + scheme: http + # -- ServiceMonitor will use these tlsConfig settings to make the health check requests + tlsConfig: null + # -- If defined, will create a MetricsInstance for the Grafana Agent Operator. + metricsInstance: + # -- If enabled, MetricsInstance resources for Grafana Agent Operator are created + enabled: true + # -- MetricsInstance annotations + annotations: {} + # -- Additional MetricsInstance labels + labels: {} + # -- If defined a MetricsInstance will be created to remote write metrics. + remoteWrite: null + # Self monitoring determines whether Loki should scrape its own logs. + # This feature currently relies on the Grafana Agent Operator being installed, + # which is installed by default using the grafana-agent-operator sub-chart. + # It will create custom resources for GrafanaAgent, LogsInstance, and PodLogs to configure + # scrape configs to scrape its own logs with the labels expected by the included dashboards. + selfMonitoring: + enabled: false + # -- Tenant to use for self monitoring + tenant: + # -- Name of the tenant + name: "self-monitoring" + # -- Namespace to create additional tenant token secret in. Useful if your Grafana instance + # is in a separate namespace. Token will still be created in the canary namespace. + secretNamespace: "{{ .Release.Namespace }}" + # Grafana Agent configuration + grafanaAgent: + # -- Controls whether to install the Grafana Agent Operator and its CRDs. + # Note that helm will not install CRDs if this flag is enabled during an upgrade. + # In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds + installOperator: false + # -- Grafana Agent annotations + annotations: {} + # -- Additional Grafana Agent labels + labels: {} + # -- Enable the config read api on port 8080 of the agent + enableConfigReadAPI: false + # -- The name of the PriorityClass for GrafanaAgent pods + priorityClassName: null + # -- Resource requests and limits for the grafanaAgent pods + resources: {} + # limits: + # memory: 200Mi + # requests: + # cpu: 50m + # memory: 100Mi + # -- Tolerations for GrafanaAgent pods + tolerations: [] + # PodLogs configuration + podLogs: + # -- PodLogs version + apiVersion: monitoring.grafana.com/v1alpha1 + # -- PodLogs annotations + annotations: {} + # -- Additional PodLogs labels + labels: {} + # -- PodLogs relabel configs to apply to samples before scraping + # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + relabelings: [] + # -- Additional pipeline stages to process logs after scraping + # https://grafana.com/docs/agent/latest/operator/api/#pipelinestagespec-a-namemonitoringgrafanacomv1alpha1pipelinestagespeca + additionalPipelineStages: [] + # LogsInstance configuration + logsInstance: + # -- LogsInstance annotations + annotations: {} + # -- Additional LogsInstance labels + labels: {} + # -- Additional clients for remote write + clients: null + +# -- DEPRECATED Configuration for the table-manager. The table-manager is only necessary when using a deprecated +# index type such as Cassandra, Bigtable, or DynamoDB, it has not been necessary since loki introduced self- +# contained index types like 'boltdb-shipper' and 'tsdb'. This will be removed in a future helm chart. +tableManager: + # -- Specifies whether the table-manager should be enabled + enabled: false + image: + # -- The Docker registry for the table-manager image. Overrides `loki.image.registry` + registry: null + # -- Docker image repository for the table-manager image. Overrides `loki.image.repository` + repository: null + # -- Docker image tag for the table-manager image. Overrides `loki.image.tag` + tag: null + # -- Command to execute instead of defined in Docker image + command: null + # -- The name of the PriorityClass for table-manager pods + priorityClassName: null + # -- Labels for table-manager pods + podLabels: {} + # -- Annotations for table-manager deployment + annotations: {} + # -- Annotations for table-manager pods + podAnnotations: {} + service: + # -- Annotations for table-manager Service + annotations: {} + # -- Additional labels for table-manager Service + labels: {} + # -- Additional CLI args for the table-manager + extraArgs: [] + # -- Environment variables to add to the table-manager pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the table-manager pods + extraEnvFrom: [] + # -- Volume mounts to add to the table-manager pods + extraVolumeMounts: [] + # -- Volumes to add to the table-manager pods + extraVolumes: [] + # -- Resource requests and limits for the table-manager + resources: {} + # -- Containers to add to the table-manager pods + extraContainers: [] + # -- Grace period to allow the table-manager to shutdown before it is killed + terminationGracePeriodSeconds: 30 + # -- Affinity for table-manager pods. + # @default -- Hard node and anti-affinity + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: table-manager + topologyKey: kubernetes.io/hostname + # -- DNS config table-manager pods + dnsConfig: {} + # -- Node selector for table-manager pods + nodeSelector: {} + # -- Tolerations for table-manager pods + tolerations: [] + # -- Enable deletes by retention + retention_deletes_enabled: false + # -- Set retention period + retention_period: 0 diff --git a/tools/dev/k3d/Makefile b/tools/dev/k3d/Makefile index bf2a9eace5c5..f06b709cb3e3 100644 --- a/tools/dev/k3d/Makefile +++ b/tools/dev/k3d/Makefile @@ -46,6 +46,14 @@ loki-ha-single-binary: prepare helm-cluster $(MAKE) -C $(CURDIR) helm-install-loki-ha-single-binary echo "Helm installation finished. You can tear down this cluster with make down." +loki-distributed: prepare helm-cluster + $(MAKE) -C $(CURDIR) apply-loki-helm-cluster + echo "Waiting $(SLEEP)s for cluster to be ready for helm installation." + # wait for tk apply to finish and cluster is ready for helm install + sleep $(SLEEP) + $(MAKE) -C $(CURDIR) helm-install-loki-distributed + echo "Helm installation finished. You can tear down this cluster with make down." + helm-cluster: prepare $(CURDIR)/scripts/create_cluster.sh helm-cluster $(REGISTRY_PORT) # wait for the cluster to be ready @@ -155,11 +163,20 @@ helm-upgrade-loki-ha-single-binary: helm-uninstall-loki-binary: $(HELM) uninstall loki-single-binary -n loki +helm-install-loki-distributed: + $(HELM) install loki "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/loki-distributed.yaml" + +helm-upgrade-loki-distributed: + $(HELM) upgrade loki "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/loki-distributed.yaml" + +helm-uninstall-loki-distributed: + $(HELM) uninstall loki -n loki + helm-install-kube-state-metrics: - helm install kube-state-metrics --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/kube-state-metrics.yaml + $(HELM) install kube-state-metrics --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/kube-state-metrics.yaml helm-install-enterprise-logs-cloud-monitoring: - helm install enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml" + $(HELM) install enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml" helm-upgrade-enterprise-logs-cloud-monitoring: - helm upgrade enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml" + $(HELM) upgrade enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml" diff --git a/tools/dev/k3d/environments/helm-cluster/spec.json b/tools/dev/k3d/environments/helm-cluster/spec.json index 8da04edb4847..b480bb66168d 100644 --- a/tools/dev/k3d/environments/helm-cluster/spec.json +++ b/tools/dev/k3d/environments/helm-cluster/spec.json @@ -6,7 +6,7 @@ "namespace": "environments/helm-cluster/main.jsonnet" }, "spec": { - "apiServer": "https://0.0.0.0:38311", + "apiServer": "https://0.0.0.0:45479", "namespace": "k3d-helm-cluster", "resourceDefaults": {}, "expectVersions": {} diff --git a/tools/dev/k3d/environments/helm-cluster/values/loki-distributed-tls.yaml b/tools/dev/k3d/environments/helm-cluster/values/loki-distributed-tls.yaml new file mode 100644 index 000000000000..d442cae407e9 --- /dev/null +++ b/tools/dev/k3d/environments/helm-cluster/values/loki-distributed-tls.yaml @@ -0,0 +1,241 @@ +common_client_crt: &common_client_crt /var/client-tls/tls.crt +common_client_key: &common_client_key /var/client-tls/tls.key +common_ca_crt: &common_ca_crt /var/root-tls/tls.crt +common_server_crt: &common_server_crt /var/tls/tls.crt +common_server_key: &common_server_key /var/tls/tls.key +common_ca_secret: &common_ca_secret ca-tls +common_client_secret: &common_client_secret client-tls +common_server_secret: &common_server_secret my-demo-app-tls + +base_grpc_tls: &base_grpc_tls + tls_cert_path: *common_client_crt + tls_key_path: *common_client_key + tls_ca_path: *common_ca_crt + +base_grpc_tls_with_server_name: &base_grpc_tls_with_server_name + tls_server_name: loki-memberlist + <<: *base_grpc_tls + +base_extra_volume_mounts: &base_extra_volume_mounts + extraVolumeMounts: + - name: tls-cert + mountPath: /var/tls + - name: root-tls-cert + mountPath: /var/root-tls + - name: client-tls + mountPath: /var/client-tls + +base_extra_volumes: &base_extra_volumes + extraVolumes: + - name: tls-cert + secret: + secretName: *common_server_secret + - name: root-tls-cert + secret: + secretName: *common_ca_secret + - name: client-tls + secret: + secretName: *common_client_secret + +test: + enabled: false + +monitoring: + dashboards: + enabled: true + namespace: k3d-helm-cluster + selfMonitoring: + enabled: true + tenant: + name: loki + secretNamespace: k3d-helm-cluster + logsInstance: + clients: + - name: loki + url: https://loki-gateway.default.svc.cluster.local/loki/api/v1/push + external_labels: + cluster: loki + tlsConfig: + insecureSkipVerify: false + cert: + secret: + key: tls.crt + name: client-tls + ca: + secret: + key: tls.crt + name: my-ca-tls + keySecret: + key: tls.key + name: client-tls + serverName: loki-gateway + tenantId: "self-monitoring" + serviceMonitor: + labels: + release: "prometheus" + rules: + namespace: k3d-helm-cluster + labels: + release: "prometheus" + lokiCanary: + <<: *base_extra_volumes + <<: *base_extra_volume_mounts + extraArgs: + - -ca-file=/var/root-tls/tls.crt + - -cert-file=/var/tls/tls.crt + - -key-file=/var/tls/tls.key + - -tls=true +minio: + enabled: true +backend: + replicas: 0 +read: + replicas: 0 +write: + replicas: 0 +singleBinary: + replicas: 0 + +gateway: + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + readinessProbe: + httpGet: + path: / + port: http-metrics + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + nginxConfig: + ssl: true + serverSnippet: | + listen 443 ssl; + ssl_verify_client off; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_certificate /var/tls/tls.crt; + ssl_certificate_key /var/tls/tls.key; + ssl_client_certificate /var/client-tls/tls.crt; + ssl_trusted_certificate /var/root-tls/tls.crt; + server_name loki-memberlist; + schema: https + +compactor: + replicas: 1 + enabled: true + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +distributor: + replicas: 1 + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +indexGateway: + replicas: 1 + enabled: true + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +ingester: + replicas: 3 + maxUnavailable: 1 + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +querier: + replicas: 3 + maxUnavailable: 1 + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +queryFrontend: + replicas: 1 + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +queryScheduler: + replicas: 2 + enabled: true + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +ruler: + replicas: 1 + enabled: true + <<: *base_extra_volume_mounts + <<: *base_extra_volumes + +loki: + schemaConfig: + configs: + - from: 2024-01-01 + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + readinessProbe: + httpGet: + path: /ready + port: http-metrics + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + structuredConfig: + server: + log_level: debug + http_tls_config: + cert_file: *common_server_crt + key_file: *common_server_key + client_ca_file: *common_ca_crt + + # we need to set this to VerifyClientCertIfGiven to allow for mutual TLS. + # we can't set it to VerifyClientCertIfGivenAndRequireAnyClientCert because + # it would broke k8s liveness and readiness probes. + client_auth_type: VerifyClientCertIfGiven + grpc_tls_config: + cert_file: *common_server_crt + key_file: *common_server_key + client_ca_file: *common_ca_crt + + # we need to set this to VerifyClientCertIfGiven to allow for mutual TLS. + # we can't set it to VerifyClientCertIfGivenAndRequireAnyClientCert because + # it would broke k8s liveness and readiness probes. + client_auth_type: VerifyClientCertIfGiven + ingester_client: + grpc_client_config: + <<: *base_grpc_tls_with_server_name + tls_enabled: true + query_scheduler: + grpc_client_config: + <<: *base_grpc_tls_with_server_name + tls_enabled: true + frontend: + tail_tls_config: + <<: *base_grpc_tls_with_server_name + grpc_client_config: + <<: *base_grpc_tls_with_server_name + tls_enabled: true + storage_config: + tsdb_shipper: + index_gateway_client: + grpc_client_config: + <<: *base_grpc_tls_with_server_name + tls_enabled: true + frontend_worker: + grpc_client_config: + <<: *base_grpc_tls_with_server_name + tls_enabled: true + memberlist: + <<: *base_grpc_tls_with_server_name + tls_enabled: true + ruler: + ruler_client: + <<: *base_grpc_tls_with_server_name + tls_enabled: true + evaluation: + query_frontend: + <<: *base_grpc_tls_with_server_name + tls_enabled: true \ No newline at end of file diff --git a/tools/dev/k3d/environments/helm-cluster/values/loki-distributed.yaml b/tools/dev/k3d/environments/helm-cluster/values/loki-distributed.yaml new file mode 100644 index 000000000000..860bac06501b --- /dev/null +++ b/tools/dev/k3d/environments/helm-cluster/values/loki-distributed.yaml @@ -0,0 +1,47 @@ +--- +monitoring: + dashboards: + namespace: k3d-helm-cluster + selfMonitoring: + tenant: + name: loki + secretNamespace: k3d-helm-cluster + serviceMonitor: + labels: + release: "prometheus" + rules: + namespace: k3d-helm-cluster + labels: + release: "prometheus" +minio: + enabled: true +backend: + replicas: 0 +read: + replicas: 0 +write: + replicas: 0 +singleBinary: + replicas: 0 +compactor: + replicas: 1 + enabled: true +distributor: + replicas: 1 +indexGateway: + replicas: 1 + enabled: true +ingester: + replicas: 3 + maxUnavailable: 1 +querier: + replicas: 3 + maxUnavailable: 1 +queryFrontend: + replicas: 1 +queryScheduler: + replicas: 2 + enabled: true +ruler: + replicas: 1 + enabled: true diff --git a/tools/dev/k3d/jsonnetfile.lock.json b/tools/dev/k3d/jsonnetfile.lock.json index 655532ca3086..441a39d0a792 100644 --- a/tools/dev/k3d/jsonnetfile.lock.json +++ b/tools/dev/k3d/jsonnetfile.lock.json @@ -8,7 +8,7 @@ "subdir": "consul" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "Po3c1Ic96ngrJCtOazic/7OsLkoILOKZWXWyZWl+od8=" }, { @@ -18,7 +18,7 @@ "subdir": "enterprise-metrics" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "hi2ZpHKl7qWXmSZ46sAycjWEQK6oGsoECuDKQT1dA+k=" }, { @@ -28,7 +28,7 @@ "subdir": "etcd-operator" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "duHm6wmUju5KHQurOe6dnXoKgl5gTUsfGplgbmAOsHw=" }, { @@ -38,7 +38,7 @@ "subdir": "grafana" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "Y5nheroSOIwmE+djEVPq4OvvTxKenzdHhpEwaR3Ebjs=" }, { @@ -48,7 +48,7 @@ "subdir": "jaeger-agent-mixin" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "NyRKfJyqLhB9oHLpr+b47b5yiB3BuBB9ZmRcVk0IVEk=" }, { @@ -58,7 +58,7 @@ "subdir": "ksonnet-util" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "0y3AFX9LQSpfWTxWKSwoLgbt0Wc9nnCwhMH2szKzHv0=" }, { @@ -78,7 +78,7 @@ "subdir": "memcached" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "Cc715Y3rgTuimgDFIw+FaKzXSJGRYwt1pFTMbdrNBD8=" }, { @@ -88,7 +88,7 @@ "subdir": "tanka-util" } }, - "version": "c0abc546c782a095a22c277d36f871bb94ffc944", + "version": "f31ba2e68d0cc8acdf7acd5c7b181a6b09aeb4c3", "sum": "ShSIissXdvCy1izTCDZX6tY7qxCoepE5L+WJ52Hw7ZQ=" }, { @@ -108,8 +108,8 @@ "subdir": "doc-util" } }, - "version": "7c865ec0606f2b68c0f6b2721f101e6a99cd2593", - "sum": "zjjufxN4yAIevldYEERiZEp27vK0BJKj1VvZcVtWiOo=" + "version": "6ac6c69685b8c29c54515448eaca583da2d88150", + "sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U=" }, { "source": { @@ -118,7 +118,7 @@ "subdir": "1.20" } }, - "version": "44a9f3d21c089a01f62b22e25bdf553f488a74e8", + "version": "3e32f80d1493d1579d273d1522af1fae2cc7c97f", "sum": "KXx5RVXiqTJQo2GVfrD8DIvlm292s0TxfTKT8I591+c=" } ], diff --git a/tools/dev/k3d/scripts/create_cluster.sh b/tools/dev/k3d/scripts/create_cluster.sh index ede41dc89fda..b441fe478839 100755 --- a/tools/dev/k3d/scripts/create_cluster.sh +++ b/tools/dev/k3d/scripts/create_cluster.sh @@ -53,7 +53,7 @@ for file in monitoring.coreos.com_alertmanagerconfigs.yaml \ done # Apply CRDs needed for grafana agent -agent_crd_base_url="https://raw.githubusercontent.com/grafana/agent/main/production/operator/crds" +agent_crd_base_url="https://raw.githubusercontent.com/grafana/agent/7dbb39c70bbb67be40e528cb71a3541b59dbe93d/production/operator/crds" for file in monitoring.grafana.com_grafanaagents.yaml \ monitoring.grafana.com_integrations.yaml \ monitoring.grafana.com_logsinstances.yaml \