From 7c14df9674504ab9f8f7c499f36a3f52e712236e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lukas=20Juozas=20Janu=C5=A1aitis?= Date: Sat, 12 Oct 2024 12:40:04 +0300 Subject: [PATCH] docs(alert): clarify remote rule evalutaion --- docs/sources/alert/_index.md | 71 +++++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/docs/sources/alert/_index.md b/docs/sources/alert/_index.md index e12e073c3b88..b6a019bb01df 100644 --- a/docs/sources/alert/_index.md +++ b/docs/sources/alert/_index.md @@ -33,7 +33,6 @@ ruler: kvstore: store: inmemory enable_api: true - ``` We support two kinds of rules: [alerting](#alerting-rules) rules and [recording](#recording-rules) rules. @@ -62,9 +61,9 @@ groups: > 0.05 for: 10m labels: - severity: page + severity: page annotations: - summary: High request latency + summary: High request latency - name: credentials_leak rules: - alert: http-credentials-leaked @@ -106,7 +105,6 @@ This query (`expr`) will be executed every 1 minute (`interval`), the result of name we have defined (`record`). This metric named `nginx:requests:rate1m` can now be sent to Prometheus, where it will be stored just like any other metric. - ### Limiting Alerts and Recording Rule Samples Like [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#limiting-alerts-and-series), you can configure a limit for alerts produced by alerting rules and samples produced by recording rules. This limit can be configured per-group. Using limits can prevent a faulty rule from generating a large number of alerts or recording samples. When the limit is exceeded, all recording samples produced by the rule are discarded, and if it is an alerting rule, all alerts for the rule, active, pending, or inactive, are cleared. The event will be recorded as an error in the evaluation, and the rule health will be set to `err`. The default value for limit is `0` meaning no limit. @@ -115,8 +113,6 @@ Like [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/rec Here is an example of a rule group along with its limit configured. - - ```yaml groups: - name: production_rules @@ -131,9 +127,9 @@ groups: > 0.05 for: 10m labels: - severity: page + severity: page annotations: - summary: High request latency + summary: High request latency - record: nginx:requests:rate1m expr: | sum( @@ -184,6 +180,7 @@ We don't always control the source code of applications we run. Load balancers a ### Event alerting Sometimes you want to know whether _any_ instance of something has occurred. Alerting based on logs can be a great way to handle this, such as finding examples of leaked authentication credentials: + ```yaml - name: credentials_leak rules: @@ -209,10 +206,11 @@ As an example, we can use LogQL v2 to help Loki to monitor _itself_, alerting us ## Interacting with the Ruler ### Lokitool + Because the rule files are identical to Prometheus rule files, we can interact with the Loki Ruler via `lokitool`. {{% admonition type="note" %}} -lokitool is intended to run against multi-tenant Loki. The commands need an `--id=` flag set to the Loki instance ID or set the environment variable `LOKI_TENANT_ID`. If Loki is running in single tenant mode, the required ID is `fake`. +lokitool is intended to run against multi-tenant Loki. The commands need an `--id=` flag set to the Loki instance ID or set the environment variable `LOKI_TENANT_ID`. If Loki is running in single tenant mode, the required ID is `fake`. {{% /admonition %}} An example workflow is included below: @@ -284,6 +282,28 @@ resource "loki_rule_group_recording" "test" { ``` +### Cortex rules action + +The [Cortex rules action](https://github.com/grafana/cortex-rules-action) introduced Loki as a backend which can be handy for managing rules in a CI/CD pipeline. It can be used to lint, diff, and sync rules between a local directory and a remote Loki instance. + +```yaml +- name: Lint Loki rules + uses: grafana/cortex-rules-action@master + env: + ACTION: check + RULES_DIR: # Example: logs/recording_rules/,logs/alerts/ + BACKEND: loki + +- name: Deploy rules to Loki staging + uses: grafana/cortex-rules-action@master + env: + CORTEX_ADDRESS: + CORTEX_TENANT_ID: fake + ACTION: sync + RULES_DIR: # Example: logs/recording_rules/,logs/alerts/ + BACKEND: loki +``` + ## Scheduling and best practices One option to scale the Ruler is by scaling it horizontally. However, with multiple Ruler instances running they will need to coordinate to determine which instance will evaluate which rule. Similar to the ingesters, the Rulers establish a hash ring to divide up the responsibilities of evaluating rules. @@ -294,19 +314,19 @@ A full sharding-enabled Ruler example is: ```yaml ruler: - alertmanager_url: - enable_alertmanager_v2: true - enable_api: true - enable_sharding: true - ring: - kvstore: - consul: - host: consul.loki-dev.svc.cluster.local:8500 - store: consul - rule_path: /tmp/rules - storage: - gcs: - bucket_name: + alertmanager_url: + enable_alertmanager_v2: true # true by default since Loki 3.2.0 + enable_api: true + enable_sharding: true + ring: + kvstore: + consul: + host: consul.loki-dev.svc.cluster.local:8500 + store: consul + rule_path: /tmp/rules + storage: + gcs: + bucket_name: ``` ## Ruler storage @@ -316,18 +336,25 @@ The Ruler supports the following types of storage: `azure`, `gcs`, `s3`, `swift` The local implementation reads the rule files off of the local filesystem. This is a read-only backend that does not support the creation and deletion of rules through the [Ruler API](https://grafana.com/docs/loki//reference/loki-http-api#ruler). Despite the fact that it reads the local filesystem this method can still be used in a sharded Ruler configuration if the operator takes care to load the same rules to every Ruler. For instance, this could be accomplished by mounting a [Kubernetes ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) onto every Ruler pod. A typical local configuration might look something like: + ``` -ruler.storage.type=local -ruler.storage.local.directory=/tmp/loki/rules ``` With the above configuration, the Ruler would expect the following layout: + ``` /tmp/loki/rules//rules1.yaml /rules2.yaml ``` + Yaml files are expected to be [Prometheus-compatible](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) but include LogQL expressions as specified in the beginning of this doc. +## Remote rule evaluation + +With larger deployments and complex rules, running a ruler in local evaluation mode brings problems where results could be inconsistent or incomplete compared to what you see in Grafana. The remote mode should be used to evaluate rules against the query frontend to solve this. A more detailed explanation can be found in [scalability documentation]({{< relref "../operations/scalability.md#remote-rule-evaluation" >}}). + ## Future improvements There are a few things coming to increase the robustness of this service. In no particular order: