diff --git a/.drone/drone.yml b/.drone/drone.yml index b234441b6770..4a55da51c8e5 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -75,7 +75,7 @@ steps: - commands: - apt-get update -y && apt-get install -y libsystemd-dev - make lint - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Lint trigger: event: @@ -93,7 +93,7 @@ steps: - ERR_MSG="Dashboard definitions are out of date. Please run 'make generate-dashboards' and commit changes!" - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Regenerate dashboards trigger: event: @@ -111,7 +111,7 @@ steps: - ERR_MSG="Custom Resource Definitions are out of date. Please run 'make generate-crds' and commit changes!" - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Regenerate crds trigger: event: @@ -126,7 +126,7 @@ platform: steps: - commands: - make GO_TAGS="nodocker" test - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Run Go tests trigger: event: @@ -141,7 +141,7 @@ platform: steps: - commands: - K8S_USE_DOCKER_NETWORK=1 make test - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Run Go tests volumes: - name: docker @@ -164,7 +164,7 @@ platform: steps: - commands: - go test -tags="nodocker,nonetwork" ./... - image: grafana/agent-build-image:0.30.4-windows + image: grafana/agent-build-image:0.31.0-windows name: Run Go tests trigger: ref: @@ -179,7 +179,7 @@ platform: steps: - commands: - make agent-image - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build container volumes: - name: docker @@ -204,7 +204,7 @@ platform: steps: - commands: - make agentctl-image - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build container volumes: - name: docker @@ -229,7 +229,7 @@ platform: steps: - commands: - make operator-image - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build container volumes: - name: docker @@ -255,7 +255,7 @@ platform: steps: - commands: - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agent' - image: grafana/agent-build-image:0.30.4-windows + image: grafana/agent-build-image:0.31.0-windows name: Build container volumes: - name: docker @@ -281,7 +281,7 @@ platform: steps: - commands: - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agentctl' - image: grafana/agent-build-image:0.30.4-windows + image: grafana/agent-build-image:0.31.0-windows name: Build container volumes: - name: docker @@ -308,7 +308,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -325,7 +325,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -342,7 +342,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -359,7 +359,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -375,7 +375,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -391,7 +391,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -407,7 +407,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -423,7 +423,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -440,7 +440,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -457,7 +457,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -474,7 +474,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -491,7 +491,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -507,7 +507,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -523,7 +523,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -539,7 +539,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -555,7 +555,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -572,7 +572,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -589,7 +589,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -606,7 +606,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -623,7 +623,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -639,7 +639,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -655,7 +655,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -671,7 +671,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -687,7 +687,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -704,7 +704,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -721,7 +721,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -738,7 +738,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -755,7 +755,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -771,7 +771,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -787,7 +787,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -803,7 +803,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -819,7 +819,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -836,7 +836,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -853,7 +853,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Build trigger: event: @@ -869,7 +869,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Configure QEMU volumes: - name: docker @@ -889,7 +889,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Publish container volumes: - name: docker @@ -913,7 +913,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Configure QEMU volumes: - name: docker @@ -933,7 +933,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Publish container volumes: - name: docker @@ -957,7 +957,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Configure QEMU volumes: - name: docker @@ -977,7 +977,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Publish container volumes: - name: docker @@ -1001,7 +1001,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Configure QEMU volumes: - name: docker @@ -1021,7 +1021,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Publish container volumes: - name: docker @@ -1050,7 +1050,7 @@ steps: from_secret: docker_login DOCKER_PASSWORD: from_secret: docker_password - image: grafana/agent-build-image:0.30.4-windows + image: grafana/agent-build-image:0.31.0-windows name: Build containers volumes: - name: docker @@ -1079,7 +1079,7 @@ steps: from_secret: docker_login DOCKER_PASSWORD: from_secret: docker_password - image: grafana/agent-build-image:0.30.4-windows + image: grafana/agent-build-image:0.31.0-windows name: Build containers volumes: - name: docker @@ -1196,7 +1196,7 @@ steps: from_secret: gpg_private_key GPG_PUBLIC_KEY: from_secret: gpg_public_key - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Publish release volumes: - name: docker @@ -1221,7 +1221,7 @@ steps: - DOCKER_OPTS="" make dist/grafana-agentctl-linux-amd64 - DOCKER_OPTS="" make dist.temp/grafana-agent-flow-linux-amd64 - DOCKER_OPTS="" make test-packages - image: grafana/agent-build-image:0.30.4 + image: grafana/agent-build-image:0.31.0 name: Test Linux system packages volumes: - name: docker @@ -1317,6 +1317,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: b4b3bb3578124bba1758f323695216281365054c623738d38e51cc37125277ae +hmac: a77b4f7b2708cc4417edd42fae39c307abcaec2c4f0f6176212665133bd20d5d ... diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 4b9f7077ed57..823ce60c2194 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -16,7 +16,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version: "1.22" - name: Set OTEL Exporter Endpoint run: echo "OTEL_EXPORTER_ENDPOINT=172.17.0.1:4318" >> $GITHUB_ENV - name: Run tests diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 59d4fbe34540..8778ce4154e2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,10 +17,10 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 - - name: Set up Go 1.21 + - name: Set up Go 1.22 uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version: "1.22" cache: true - name: Test run: make GO_TAGS="nodocker" test diff --git a/CHANGELOG.md b/CHANGELOG.md index a7c4c7020881..78b628221fb7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,23 +10,51 @@ internal API changes are not present. Main (unreleased) ----------------- -### Security fixes +### Breaking changes -- Fixes following vulnerabilities (@hainenber) - - [GO-2023-2409](https://github.com/advisories/GHSA-mhpq-9638-x6pw) - - [GO-2023-2412](https://github.com/advisories/GHSA-7ww5-4wqc-m92c) - - [CVE-2023-49568](https://github.com/advisories/GHSA-mw99-9chc-xw7r) +- Prohibit the configuration of services within modules. (@wildum) + +- For `otelcol.exporter` components, change the default value of `disable_high_cardinality_metrics` to `true`. (@ptodev) + +### Features + +- A new `discovery.process` component for discovering Linux OS processes on the current host. (@korniltsev) + +- A new `pyroscope.java` component for profiling Java processes using async-profiler. (@korniltsev) + +- A new `otelcol.processor.resourcedetection` component which inserts resource attributes + to OTLP telemetry based on the host on which Grafana Agent is running. (@ptodev) + +- Expose track_timestamps_staleness on Prometheus scraping, to fix the issue where container metrics live for 5 minutes after the container disappears. (@ptodev) ### Enhancements +- Include line numbers in profiles produced by `pyrsocope.java` component. (@korniltsev) - Add an option to the windows static mode installer for expanding environment vars in the yaml config. (@erikbaranowski) - Add authentication support to `loki.source.awsfirehose` (@sberz) - Sort kubelet endpoint to reduce pressure on K8s's API server and watcher endpoints. (@hainenber) -- Expose `physical_disk` collector from `windows_exporter` v0.24.0 to +- Expose `physical_disk` collector from `windows_exporter` v0.24.0 to Flow configuration. (@hainenber) +- Renamed Grafana Agent Mixin's "prometheus.remote_write" dashboard to + "Prometheus Components" and added charts for `prometheus.scrape` success rate + and duration metrics. (@thampiotr) + +- Removed `ClusterLamportClockDrift` and `ClusterLamportClockStuck` alerts from + Grafana Agent Mixin to focus on alerting on symptoms. (@thampiotr) + +- Increased clustering alert periods to 10 minutes to improve the + signal-to-noise ratio in Grafana Agent Mixin. (@thampiotr) + +- `mimir.rules.kubernetes` has a new `prometheus_http_prefix` argument to configure + the HTTP endpoint on which to connect to Mimir's API. (@hainenber) + +- `service_name` label is inferred from discovery meta labels in `pyroscope.java` (@korniltsev) + +- Mutex and block pprofs are now available via the pprof endpoint. (@mattdurham) + ### Bugfixes - Fix an issue in `remote.s3` where the exported content of an object would be an empty string if `remote.s3` failed to fully retrieve @@ -37,6 +65,16 @@ Main (unreleased) - Fix a duplicate metrics registration panic when sending metrics to an static mode metric instance's write handler. (@tpaschalis) +- Fix issue causing duplicate logs when a docker target is restarted. (@captncraig) + +- Fix an issue where blocks having the same type and the same label across + modules could result in missed updates. (@thampiotr) + +- Fix an issue with static integrations-next marshaling where non singletons + would cause `/-/config` to fail to marshal. (@erikbaranowski) + +- Fix divide-by-zero issue when sharding targets. (@hainenber) + ### Other changes - Removed support for Windows 2012 in line with Microsoft end of life. (@mattdurham) @@ -45,6 +83,34 @@ Main (unreleased) - Updated dependency to add support for Go 1.22 (@stefanb) +- Use Go 1.22 for builds. (@rfratto) + +v0.39.2 (2024-1-31) +-------------------- + +### Bugfixes + +- Fix error introduced in v0.39.0 preventing remote write to Amazon Managed Prometheus. (@captncraig) + +- An error will be returned in the converter from Static to Flow when `scrape_integration` is set + to `true` but no `remote_write` is defined. (@erikbaranowski) + + +v0.39.1 (2024-01-19) +-------------------- + +### Security fixes + +- Fixes following vulnerabilities (@hainenber) + - [GO-2023-2409](https://github.com/advisories/GHSA-mhpq-9638-x6pw) + - [GO-2023-2412](https://github.com/advisories/GHSA-7ww5-4wqc-m92c) + - [CVE-2023-49568](https://github.com/advisories/GHSA-mw99-9chc-xw7r) + +### Bugfixes + +- Fix issue where installing the Windows Agent Flow installer would hang then crash. (@mattdurham) + + v0.39.0 (2024-01-09) -------------------- @@ -63,7 +129,7 @@ v0.39.0 (2024-01-09) - This change will not break any existing configurations and you can opt in to validation via the `validate_dimensions` configuration option. - Before this change, pulling metrics for azure resources with variable dimensions required one configuration per metric + dimension combination to avoid an error. - After this change, you can include all metrics and dimensions in a single configuration and the Azure APIs will only return dimensions which are valid for the various metrics. - + ### Features - A new `discovery.ovhcloud` component for discovering scrape targets on OVHcloud. (@ptodev) @@ -160,7 +226,7 @@ v0.39.0 (2024-01-09) - Attach unique Agent ID header to remote-write requests. (@captncraig) - Update to v2.48.1 of `github.com/prometheus/prometheus`. - Previously, a custom fork of v2.47.2 was used. + Previously, a custom fork of v2.47.2 was used. The custom fork of v2.47.2 also contained prometheus#12729 and prometheus#12677. v0.38.1 (2023-11-30) diff --git a/Makefile b/Makefile index 7bdd7fdee77f..f08f0f9d89a8 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ ## ## test Run tests ## lint Lint code -## integration-tests Run integration tests +## integration-test Run integration tests ## ## Targets for building binaries: ## @@ -167,7 +167,7 @@ test-packages: docker pull $(BUILD_IMAGE) go test -tags=packaging ./packaging -.PHONY: integration-tests +.PHONY: integration-test integration-test: cd integration-tests && $(GO_ENV) go run . diff --git a/build-image/Dockerfile b/build-image/Dockerfile index eeea8fdce018..7bce2ddfde9e 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -23,7 +23,7 @@ FROM alpine:3.17 as helm RUN apk add --no-cache helm # Dependency: Go and Go dependencies -FROM golang:1.21.4-bullseye as golang +FROM golang:1.22.0-bullseye as golang # Keep in sync with cmd/grafana-agent-operator/DEVELOPERS.md ENV CONTROLLER_GEN_VERSION v0.9.2 diff --git a/build-image/windows/Dockerfile b/build-image/windows/Dockerfile index 6664d8928e82..ddd3448e2c09 100644 --- a/build-image/windows/Dockerfile +++ b/build-image/windows/Dockerfile @@ -1,4 +1,4 @@ -FROM library/golang:1.21.4-windowsservercore-1809 +FROM library/golang:1.22.0-windowsservercore-1809 SHELL ["powershell", "-command"] diff --git a/cmd/grafana-agent-operator/DEVELOPERS.md b/cmd/grafana-agent-operator/DEVELOPERS.md index 9c2453e1f9f9..58f7be9ae8d5 100644 --- a/cmd/grafana-agent-operator/DEVELOPERS.md +++ b/cmd/grafana-agent-operator/DEVELOPERS.md @@ -74,7 +74,7 @@ running. ### Apply the CRDs Generated CRDs used by the operator can be found in [the Production -folder](../../production/operator/crds). Deploy them from the root of the +folder](../../operations/agent-static-operator/crds). Deploy them from the root of the repository with: ``` diff --git a/cmd/internal/flowmode/cmd_run.go b/cmd/internal/flowmode/cmd_run.go index a1c7d26f2e7d..c1c92987400a 100644 --- a/cmd/internal/flowmode/cmd_run.go +++ b/cmd/internal/flowmode/cmd_run.go @@ -8,6 +8,8 @@ import ( "os" "os/signal" "path/filepath" + "runtime" + "strconv" "strings" "sync" "syscall" @@ -178,6 +180,9 @@ func (fr *flowRun) Run(configPath string) error { level.Info(l).Log("boringcrypto enabled", boringcrypto.Enabled) + // Enable the profiling. + setMutexBlockProfiling(l) + // Immediately start the tracer. go func() { err := t.Run(ctx) @@ -365,7 +370,7 @@ func getEnabledComponentsFunc(f *flow.Flow) func() map[string]interface{} { components := component.GetAllComponents(f, component.InfoOptions{}) componentNames := map[string]struct{}{} for _, c := range components { - componentNames[c.Registration.Name] = struct{}{} + componentNames[c.ComponentName] = struct{}{} } return map[string]interface{}{"enabled-components": maps.Keys(componentNames)} } @@ -455,3 +460,34 @@ func splitPeers(s, sep string) []string { } return strings.Split(s, sep) } + +func setMutexBlockProfiling(l log.Logger) { + mutexPercent := os.Getenv("PPROF_MUTEX_PROFILING_PERCENT") + if mutexPercent != "" { + rate, err := strconv.Atoi(mutexPercent) + if err == nil && rate > 0 { + // The 100/rate is because the value is interpreted as 1/rate. So 50 would be 100/50 = 2 and become 1/2 or 50%. + runtime.SetMutexProfileFraction(100 / rate) + } else { + level.Error(l).Log("msg", "error setting PPROF_MUTEX_PROFILING_PERCENT", "err", err, "value", mutexPercent) + runtime.SetMutexProfileFraction(1000) + } + } else { + // Why 1000 because that is what istio defaults to and that seemed reasonable to start with. This is 00.1% sampling. + runtime.SetMutexProfileFraction(1000) + } + blockRate := os.Getenv("PPROF_BLOCK_PROFILING_RATE") + if blockRate != "" { + rate, err := strconv.Atoi(blockRate) + if err == nil && rate > 0 { + runtime.SetBlockProfileRate(rate) + } else { + level.Error(l).Log("msg", "error setting PPROF_BLOCK_PROFILING_RATE", "err", err, "value", blockRate) + runtime.SetBlockProfileRate(10_000) + } + } else { + // This should have a negligible impact. This will track anything over 10_000ns, and will randomly sample shorter durations. + // Default taken from https://github.com/DataDog/go-profiler-notes/blob/main/block.md + runtime.SetBlockProfileRate(10_000) + } +} diff --git a/component/all/all.go b/component/all/all.go index b404f27ad4eb..0bf3da725bbf 100644 --- a/component/all/all.go +++ b/component/all/all.go @@ -25,6 +25,7 @@ import ( _ "github.com/grafana/agent/component/discovery/nomad" // Import discovery.nomad _ "github.com/grafana/agent/component/discovery/openstack" // Import discovery.openstack _ "github.com/grafana/agent/component/discovery/ovhcloud" // Import discovery.ovhcloud + _ "github.com/grafana/agent/component/discovery/process" // Import discovery.process _ "github.com/grafana/agent/component/discovery/puppetdb" // Import discovery.puppetdb _ "github.com/grafana/agent/component/discovery/relabel" // Import discovery.relabel _ "github.com/grafana/agent/component/discovery/scaleway" // Import discovery.scaleway @@ -81,6 +82,7 @@ import ( _ "github.com/grafana/agent/component/otelcol/processor/k8sattributes" // Import otelcol.processor.k8sattributes _ "github.com/grafana/agent/component/otelcol/processor/memorylimiter" // Import otelcol.processor.memory_limiter _ "github.com/grafana/agent/component/otelcol/processor/probabilistic_sampler" // Import otelcol.processor.probabilistic_sampler + _ "github.com/grafana/agent/component/otelcol/processor/resourcedetection" // Import otelcol.processor.resourcedetection _ "github.com/grafana/agent/component/otelcol/processor/span" // Import otelcol.processor.span _ "github.com/grafana/agent/component/otelcol/processor/tail_sampling" // Import otelcol.processor.tail_sampling _ "github.com/grafana/agent/component/otelcol/processor/transform" // Import otelcol.processor.transform @@ -127,6 +129,7 @@ import ( _ "github.com/grafana/agent/component/prometheus/remotewrite" // Import prometheus.remote_write _ "github.com/grafana/agent/component/prometheus/scrape" // Import prometheus.scrape _ "github.com/grafana/agent/component/pyroscope/ebpf" // Import pyroscope.ebpf + _ "github.com/grafana/agent/component/pyroscope/java" // Import pyroscope.java _ "github.com/grafana/agent/component/pyroscope/scrape" // Import pyroscope.scrape _ "github.com/grafana/agent/component/pyroscope/write" // Import pyroscope.write _ "github.com/grafana/agent/component/remote/http" // Import remote.http diff --git a/component/component_provider.go b/component/component_provider.go index 90454b5b04c3..630961d8f6db 100644 --- a/component/component_provider.go +++ b/component/component_provider.go @@ -93,8 +93,8 @@ type Info struct { // this component depends on, or is depended on by, respectively. References, ReferencedBy []string - Registration Registration // Component registration. - Health Health // Current component health. + ComponentName string // Name of the component. + Health Health // Current component health. Arguments Arguments // Current arguments value of the component. Exports Exports // Current exports value of the component. @@ -157,7 +157,7 @@ func (info *Info) MarshalJSON() ([]byte, error) { } return json.Marshal(&componentDetailJSON{ - Name: info.Registration.Name, + Name: info.ComponentName, Type: "block", ModuleID: info.ID.ModuleID, LocalID: info.ID.LocalID, diff --git a/component/discovery/discovery.go b/component/discovery/discovery.go index 540b7586943b..54589968533a 100644 --- a/component/discovery/discovery.go +++ b/component/discovery/discovery.go @@ -45,7 +45,13 @@ func (t *DistributedTargets) Get() []Target { return t.targets } - res := make([]Target, 0, (len(t.targets)+1)/len(t.cluster.Peers())) + peerCount := len(t.cluster.Peers()) + resCap := (len(t.targets) + 1) + if peerCount != 0 { + resCap = (len(t.targets) + 1) / peerCount + } + + res := make([]Target, 0, resCap) for _, tgt := range t.targets { peers, err := t.cluster.Lookup(shard.StringKey(tgt.NonMetaLabels().String()), 1, shard.OpReadWrite) @@ -55,7 +61,7 @@ func (t *DistributedTargets) Get() []Target { // back to owning the target ourselves. res = append(res, tgt) } - if peers[0].Self { + if len(peers) == 0 || peers[0].Self { res = append(res, tgt) } } diff --git a/component/discovery/process/args.go b/component/discovery/process/args.go new file mode 100644 index 000000000000..636f6231867d --- /dev/null +++ b/component/discovery/process/args.go @@ -0,0 +1,37 @@ +package process + +import ( + "time" + + "github.com/grafana/agent/component/discovery" +) + +type Arguments struct { + Join []discovery.Target `river:"join,attr,optional"` + RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + DiscoverConfig DiscoverConfig `river:"discover_config,block,optional"` +} + +type DiscoverConfig struct { + Cwd bool `river:"cwd,attr,optional"` + Exe bool `river:"exe,attr,optional"` + Commandline bool `river:"commandline,attr,optional"` + Username bool `river:"username,attr,optional"` + UID bool `river:"uid,attr,optional"` + ContainerID bool `river:"container_id,attr,optional"` +} + +var DefaultConfig = Arguments{ + Join: nil, + RefreshInterval: 60 * time.Second, + DiscoverConfig: DiscoverConfig{ + Cwd: true, + Exe: true, + Commandline: true, + ContainerID: true, + }, +} + +func (args *Arguments) SetToDefault() { + *args = DefaultConfig +} diff --git a/component/discovery/process/container.go b/component/discovery/process/container.go new file mode 100644 index 000000000000..8453e99728a1 --- /dev/null +++ b/component/discovery/process/container.go @@ -0,0 +1,58 @@ +//go:build linux + +package process + +import ( + "bufio" + "io" + "regexp" + "strings" + + "github.com/grafana/agent/component/discovery" +) + +var ( + // cgroupContainerIDRe matches a container ID from a /proc/{pid}}/cgroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]{64})(?:\.|\s*$)`) +) + +func getContainerIDFromCGroup(cgroup io.Reader) string { + scanner := bufio.NewScanner(cgroup) + for scanner.Scan() { + line := scanner.Bytes() + matches := cgroupContainerIDRe.FindSubmatch(line) + if len(matches) <= 1 { + continue + } + return string(matches[1]) + } + return "" +} + +var knownContainerIDPrefixes = []string{"docker://", "containerd://", "cri-o://"} + +// get container id from __meta_kubernetes_pod_container_id label +func getContainerIDFromK8S(k8sContainerID string) string { + for _, p := range knownContainerIDPrefixes { + if strings.HasPrefix(k8sContainerID, p) { + return strings.TrimPrefix(k8sContainerID, p) + } + } + return "" +} + +func getContainerIDFromTarget(target discovery.Target) string { + cid, ok := target[labelProcessContainerID] + if ok && cid != "" { + return cid + } + cid, ok = target["__meta_kubernetes_pod_container_id"] + if ok && cid != "" { + return getContainerIDFromK8S(cid) + } + cid, ok = target["__meta_docker_container_id"] + if ok && cid != "" { + return cid + } + return "" +} diff --git a/component/discovery/process/container_test.go b/component/discovery/process/container_test.go new file mode 100644 index 000000000000..9a1facc078fe --- /dev/null +++ b/component/discovery/process/container_test.go @@ -0,0 +1,66 @@ +//go:build linux + +package process + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCGroupMatching(t *testing.T) { + type testcase = struct { + containerID, cgroup, expectedID string + } + testcases := []testcase{ + { + containerID: "containerd://a534eb629135e43beb13213976e37bb2ab95cba4c0d1d0b4e27c6bc4d8091b83", + cgroup: "12:cpuset:/kubepods.slice/kubepods-burstable.slice/" + + "kubepods-burstable-pod471203d1_984f_477e_9c35_db96487ffe5e.slice/" + + "cri-containerd-a534eb629135e43beb13213976e37bb2ab95cba4c0d1d0b4e27c6bc4d8091b83.scope", + expectedID: "a534eb629135e43beb13213976e37bb2ab95cba4c0d1d0b4e27c6bc4d8091b83", + }, + { + containerID: "cri-o://0ecc7949cbaf17e883264ea1055f60b184a7cb264fd759c4a692e1155086fe2d", + cgroup: "0::/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb57320a0_e7eb_4ac8_a791_4c4472796867.slice/" + + "crio-0ecc7949cbaf17e883264ea1055f60b184a7cb264fd759c4a692e1155086fe2d.scope", + expectedID: "0ecc7949cbaf17e883264ea1055f60b184a7cb264fd759c4a692e1155086fe2d", + }, + { + + containerID: "docker://656959d9ee87a0b131c601ce9d9f8f76b1dda60e8608c503b5979d849cbdc714", + cgroup: "0::/../../kubepods-besteffort-pod88f6f4e3_59c0_4ce8_9ecf_391c8b5a60ad.slice/" + + "docker-656959d9ee87a0b131c601ce9d9f8f76b1dda60e8608c503b5979d849cbdc714.scope", + expectedID: "656959d9ee87a0b131c601ce9d9f8f76b1dda60e8608c503b5979d849cbdc714", + }, + { + containerID: "containerd://47e320f795efcec1ecf2001c3a09c95e3701ed87de8256837b70b10e23818251", + cgroup: "0::/kubepods.slice/kubepods-burstable.slice/" + + "kubepods-burstable-podf9a04ecc_1875_491b_926c_d2f64757704e.slice/" + + "cri-containerd-47e320f795efcec1ecf2001c3a09c95e3701ed87de8256837b70b10e23818251.scope", + expectedID: "47e320f795efcec1ecf2001c3a09c95e3701ed87de8256837b70b10e23818251", + }, + { + containerID: "docker://7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + cgroup: "11:devices:/kubepods/besteffort/pod85adbef3-622f-4ef2-8f60-a8bdf3eb6c72/" + + "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + expectedID: "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + }, + { + containerID: "", + cgroup: "0::/../../user.slice/user-501.slice/session-3.scope", + expectedID: "", + }, + } + for i, tc := range testcases { + t.Run(fmt.Sprintf("testcase %d %s", i, tc.cgroup), func(t *testing.T) { + cid := getContainerIDFromCGroup(bytes.NewReader([]byte(tc.cgroup))) + expected := tc.expectedID + require.Equal(t, expected, cid) + cid = getContainerIDFromK8S(tc.containerID) + require.Equal(t, expected, cid) + }) + } +} diff --git a/component/discovery/process/discover.go b/component/discovery/process/discover.go new file mode 100644 index 000000000000..70bcd907cf65 --- /dev/null +++ b/component/discovery/process/discover.go @@ -0,0 +1,169 @@ +//go:build linux + +package process + +import ( + "errors" + "fmt" + "os" + "path" + "runtime" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/agent/component/discovery" + gopsutil "github.com/shirou/gopsutil/v3/process" + "golang.org/x/sys/unix" +) + +const ( + labelProcessID = "__process_pid__" + labelProcessExe = "__meta_process_exe" + labelProcessCwd = "__meta_process_cwd" + labelProcessCommandline = "__meta_process_commandline" + labelProcessUsername = "__meta_process_username" + labelProcessUID = "__meta_process_uid" + labelProcessContainerID = "__container_id__" +) + +type process struct { + pid string + exe string + cwd string + commandline string + containerID string + username string + uid string +} + +func (p process) String() string { + return fmt.Sprintf("pid=%s exe=%s cwd=%s commandline=%s containerID=%s", p.pid, p.exe, p.cwd, p.commandline, p.containerID) +} + +func convertProcesses(ps []process) []discovery.Target { + var res []discovery.Target + for _, p := range ps { + t := convertProcess(p) + res = append(res, t) + } + return res +} + +func convertProcess(p process) discovery.Target { + t := make(discovery.Target, 5) + t[labelProcessID] = p.pid + if p.exe != "" { + t[labelProcessExe] = p.exe + } + if p.cwd != "" { + t[labelProcessCwd] = p.cwd + } + if p.commandline != "" { + t[labelProcessCommandline] = p.commandline + } + if p.containerID != "" { + t[labelProcessContainerID] = p.containerID + } + if p.username != "" { + t[labelProcessUsername] = p.username + } + if p.uid != "" { + t[labelProcessUID] = p.uid + } + return t +} + +func discover(l log.Logger, cfg *DiscoverConfig) ([]process, error) { + processes, err := gopsutil.Processes() + if err != nil { + return nil, fmt.Errorf("failed to list processes: %w", err) + } + res := make([]process, 0, len(processes)) + loge := func(pid int, e error) { + if errors.Is(e, unix.ESRCH) { + return + } + if errors.Is(e, os.ErrNotExist) { + return + } + _ = level.Error(l).Log("msg", "failed to get process info", "err", e, "pid", pid) + } + for _, p := range processes { + spid := fmt.Sprintf("%d", p.Pid) + var ( + exe, cwd, commandline, containerID, username, uid string + ) + if cfg.Exe { + exe, err = p.Exe() + if err != nil { + loge(int(p.Pid), err) + continue + } + } + if cfg.Cwd { + cwd, err = p.Cwd() + if err != nil { + loge(int(p.Pid), err) + continue + } + } + if cfg.Commandline { + commandline, err = p.Cmdline() + if err != nil { + loge(int(p.Pid), err) + continue + } + } + if cfg.Username { + username, err = p.Username() + if err != nil { + loge(int(p.Pid), err) + continue + } + } + if cfg.UID { + uids, err := p.Uids() + if err != nil { + loge(int(p.Pid), err) + continue + } + if len(uids) > 0 { + uid = fmt.Sprintf("%d", uids[0]) + } + } + + if cfg.ContainerID { + containerID, err = getLinuxProcessContainerID(spid) + if err != nil { + loge(int(p.Pid), err) + continue + } + } + res = append(res, process{ + pid: spid, + exe: exe, + cwd: cwd, + commandline: commandline, + containerID: containerID, + username: username, + uid: uid, + }) + } + + return res, nil +} + +func getLinuxProcessContainerID(pid string) (string, error) { + if runtime.GOOS == "linux" { + cgroup, err := os.Open(path.Join("/proc", pid, "cgroup")) + if err != nil { + return "", err + } + defer cgroup.Close() + cid := getContainerIDFromCGroup(cgroup) + if cid != "" { + return cid, nil + } + } + return "", nil +} diff --git a/component/discovery/process/join.go b/component/discovery/process/join.go new file mode 100644 index 000000000000..24f0a8b22830 --- /dev/null +++ b/component/discovery/process/join.go @@ -0,0 +1,43 @@ +//go:build linux + +package process + +import "github.com/grafana/agent/component/discovery" + +func join(processes, containers []discovery.Target) []discovery.Target { + res := make([]discovery.Target, 0, len(processes)+len(containers)) + + cid2container := make(map[string]discovery.Target, len(containers)) + for _, container := range containers { + cid := getContainerIDFromTarget(container) + if cid != "" { + cid2container[cid] = container + } else { + res = append(res, container) + } + } + for _, p := range processes { + cid := getContainerIDFromTarget(p) + if cid == "" { + res = append(res, p) + continue + } + container, ok := cid2container[cid] + if !ok { + res = append(res, p) + continue + } + mergedTarget := make(discovery.Target, len(p)+len(container)) + for k, v := range p { + mergedTarget[k] = v + } + for k, v := range container { + mergedTarget[k] = v + } + res = append(res, mergedTarget) + } + for _, target := range cid2container { + res = append(res, target) + } + return res +} diff --git a/component/discovery/process/join_test.go b/component/discovery/process/join_test.go new file mode 100644 index 000000000000..8ddd7dc7cdf9 --- /dev/null +++ b/component/discovery/process/join_test.go @@ -0,0 +1,126 @@ +//go:build linux + +package process + +import ( + "fmt" + "testing" + + "github.com/grafana/agent/component/discovery" + "github.com/stretchr/testify/assert" +) + +func TestJoin(t *testing.T) { + testdata := []struct { + processes []discovery.Target + containers []discovery.Target + res []discovery.Target + }{ + { + []discovery.Target{ + convertProcess(process{ + pid: "239", + exe: "/bin/foo", + cwd: "/", + containerID: "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + }), + convertProcess(process{ + pid: "240", + exe: "/bin/bar", + cwd: "/tmp", + containerID: "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + }), + convertProcess(process{ + pid: "241", + exe: "/bin/bash", + cwd: "/opt", + containerID: "", + }), + }, []discovery.Target{ + { + "__meta_docker_container_id": "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + "foo": "bar", + }, + { + "__meta_kubernetes_pod_container_id": "docker://47e320f795efcec1ecf2001c3a09c95e3701ed87de8256837b70b10e23818251", + "qwe": "asd", + }, + { + "lol": "lol", + }, + }, []discovery.Target{ + { + "__process_pid__": "239", + "__meta_process_exe": "/bin/foo", + "__meta_process_cwd": "/", + "__container_id__": "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + "__meta_docker_container_id": "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + "foo": "bar", + }, + { + "__process_pid__": "240", + "__meta_process_exe": "/bin/bar", + "__meta_process_cwd": "/tmp", + "__container_id__": "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + "__meta_docker_container_id": "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + "foo": "bar", + }, + { + "__meta_docker_container_id": "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + "foo": "bar", + }, + { + "__process_pid__": "241", + "__meta_process_exe": "/bin/bash", + "__meta_process_cwd": "/opt", + }, + { + "__meta_kubernetes_pod_container_id": "docker://47e320f795efcec1ecf2001c3a09c95e3701ed87de8256837b70b10e23818251", + "qwe": "asd", + }, + { + "lol": "lol", + }, + }, + }, + { + []discovery.Target{ + convertProcess(process{ + pid: "239", + exe: "/bin/foo", + cwd: "/", + containerID: "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + }), + convertProcess(process{ + pid: "240", + exe: "/bin/bar", + cwd: "/", + containerID: "", + }), + }, + []discovery.Target{}, []discovery.Target{ + convertProcess(process{ + pid: "239", + exe: "/bin/foo", + cwd: "/", + containerID: "7edda1de1e0d1d366351e478359cf5fa16bb8ab53063a99bb119e56971bfb7e2", + }), + convertProcess(process{ + pid: "240", + exe: "/bin/bar", + cwd: "/", + containerID: "", + }), + }, + }, + } + for i, testdatum := range testdata { + t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { + res := join(testdatum.processes, testdatum.containers) + assert.Len(t, res, len(testdatum.res)) + for _, re := range testdatum.res { + assert.Contains(t, res, re) + } + }) + } +} diff --git a/component/discovery/process/process.go b/component/discovery/process/process.go new file mode 100644 index 000000000000..a32077ece804 --- /dev/null +++ b/component/discovery/process/process.go @@ -0,0 +1,86 @@ +//go:build linux + +package process + +import ( + "context" + "time" + + "github.com/go-kit/log" + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/discovery" +) + +func init() { + component.Register(component.Registration{ + Name: "discovery.process", + Args: Arguments{}, + Exports: discovery.Exports{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + return New(opts, args.(Arguments)) + }, + }) +} + +func New(opts component.Options, args Arguments) (*Component, error) { + c := &Component{ + l: opts.Logger, + onStateChange: opts.OnStateChange, + argsUpdates: make(chan Arguments), + args: args, + } + return c, nil +} + +type Component struct { + l log.Logger + onStateChange func(e component.Exports) + processes []discovery.Target + argsUpdates chan Arguments + args Arguments +} + +func (c *Component) Run(ctx context.Context) error { + doDiscover := func() error { + processes, err := discover(c.l, &c.args.DiscoverConfig) + if err != nil { + return err + } + c.processes = convertProcesses(processes) + c.changed() + return nil + } + if err := doDiscover(); err != nil { + return err + } + + t := time.NewTicker(c.args.RefreshInterval) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return nil + case <-t.C: + if err := doDiscover(); err != nil { + return err + } + t.Reset(c.args.RefreshInterval) + case a := <-c.argsUpdates: + c.args = a + c.changed() + } + } +} + +func (c *Component) Update(args component.Arguments) error { + a := args.(Arguments) + c.argsUpdates <- a + return nil +} + +func (c *Component) changed() { + c.onStateChange(discovery.Exports{ + Targets: join(c.processes, c.args.Join), + }) +} diff --git a/component/discovery/process/process_stub.go b/component/discovery/process/process_stub.go new file mode 100644 index 000000000000..f3563fecf9f1 --- /dev/null +++ b/component/discovery/process/process_stub.go @@ -0,0 +1,42 @@ +//go:build !linux + +package process + +import ( + "context" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/pkg/flow/logging/level" +) + +func init() { + component.Register(component.Registration{ + Name: "discovery.process", + Args: Arguments{}, + Exports: discovery.Exports{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + return New(opts, args.(Arguments)) + }, + }) +} + +func New(opts component.Options, args Arguments) (*Component, error) { + _ = level.Warn(opts.Logger).Log("msg", "the discovery.process component only works on linux; enabling it otherwise will do nothing") + return &Component{}, nil +} + +type Component struct { +} + +func (c *Component) Run(ctx context.Context) error { + select { + case <-ctx.Done(): + return nil + } +} + +func (c *Component) Update(args component.Arguments) error { + return nil +} diff --git a/component/loki/source/docker/internal/dockertarget/target.go b/component/loki/source/docker/internal/dockertarget/target.go index b410d42b9cf2..25acdefa5e57 100644 --- a/component/loki/source/docker/internal/dockertarget/target.go +++ b/component/loki/source/docker/internal/dockertarget/target.go @@ -219,6 +219,7 @@ func (t *Target) process(r io.Reader, logStreamLset model.LabelSet) { // labels (e.g. duplicated and relabeled), but this shouldn't be the // case anyway. t.positions.Put(positions.CursorKey(t.containerName), t.labelsStr, ts.Unix()) + t.since = ts.Unix() } } diff --git a/component/loki/source/docker/internal/dockertarget/target_test.go b/component/loki/source/docker/internal/dockertarget/target_test.go index a2d2053e2c9a..979f15ffb751 100644 --- a/component/loki/source/docker/internal/dockertarget/target_test.go +++ b/component/loki/source/docker/internal/dockertarget/target_test.go @@ -9,7 +9,6 @@ import ( "net/http" "net/http/httptest" "os" - "sort" "strings" "testing" "time" @@ -24,6 +23,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,7 +31,13 @@ func TestDockerTarget(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { switch path := r.URL.Path; { case strings.HasSuffix(path, "/logs"): - dat, err := os.ReadFile("testdata/flog.log") + var filePath string + if strings.Contains(r.URL.RawQuery, "since=0") { + filePath = "testdata/flog.log" + } else { + filePath = "testdata/flog_after_restart.log" + } + dat, err := os.ReadFile(filePath) require.NoError(t, err) _, err = w.Write(dat) require.NoError(t, err) @@ -76,15 +82,6 @@ func TestDockerTarget(t *testing.T) { require.NoError(t, err) tgt.StartIfNotRunning() - require.Eventually(t, func() bool { - return len(entryHandler.Received()) >= 5 - }, 5*time.Second, 100*time.Millisecond) - - received := entryHandler.Received() - sort.Slice(received, func(i, j int) bool { - return received[i].Timestamp.Before(received[j].Timestamp) - }) - expectedLines := []string{ "5.3.69.55 - - [09/Dec/2021:09:15:02 +0000] \"HEAD /brand/users/clicks-and-mortar/front-end HTTP/2.0\" 503 27087", "101.54.183.185 - - [09/Dec/2021:09:15:03 +0000] \"POST /next-generation HTTP/1.0\" 416 11468", @@ -92,9 +89,49 @@ func TestDockerTarget(t *testing.T) { "28.104.242.74 - - [09/Dec/2021:09:15:03 +0000] \"PATCH /value-added/cultivate/systems HTTP/2.0\" 405 11843", "150.187.51.54 - satterfield1852 [09/Dec/2021:09:15:03 +0000] \"GET /incentivize/deliver/innovative/cross-platform HTTP/1.1\" 301 13032", } - actualLines := make([]string, 0, 5) - for _, entry := range received[:5] { - actualLines = append(actualLines, entry.Line) + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + assertExpectedLog(c, entryHandler, expectedLines) + }, 5*time.Second, 100*time.Millisecond, "Expected log lines were not found within the time limit.") + + tgt.Stop() + entryHandler.Clear() + // restart target to simulate container restart + tgt.StartIfNotRunning() + expectedLinesAfterRestart := []string{ + "243.115.12.215 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /morph/exploit/granular HTTP/1.0\" 500 26468", + "221.41.123.237 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /user-centric/whiteboard HTTP/2.0\" 205 22487", + "89.111.144.144 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /open-source/e-commerce HTTP/1.0\" 401 11092", + "62.180.191.187 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /cultivate/integrate/technologies HTTP/2.0\" 302 12979", + "156.249.2.192 - - [09/Dec/2023:09:16:57 +0000] \"POST /revolutionize/mesh/metrics HTTP/2.0\" 401 5297", + } + assert.EventuallyWithT(t, func(c *assert.CollectT) { + assertExpectedLog(c, entryHandler, expectedLinesAfterRestart) + }, 5*time.Second, 100*time.Millisecond, "Expected log lines after restart were not found within the time limit.") +} + +// assertExpectedLog will verify that all expectedLines were received, in any order, without duplicates. +func assertExpectedLog(c *assert.CollectT, entryHandler *fake.Client, expectedLines []string) { + logLines := entryHandler.Received() + testLogLines := make(map[string]int) + for _, l := range logLines { + if containsString(expectedLines, l.Line) { + testLogLines[l.Line] += 1 + } + } + // assert that all log lines were received + assert.Len(c, testLogLines, len(expectedLines)) + // assert that there are no duplicated log lines + for _, v := range testLogLines { + assert.Equal(c, v, 1) + } +} + +func containsString(slice []string, str string) bool { + for _, item := range slice { + if item == str { + return true + } } - require.ElementsMatch(t, actualLines, expectedLines) + return false } diff --git a/component/loki/source/docker/internal/dockertarget/testdata/flog_after_restart.log b/component/loki/source/docker/internal/dockertarget/testdata/flog_after_restart.log new file mode 100644 index 000000000000..59afb576805e Binary files /dev/null and b/component/loki/source/docker/internal/dockertarget/testdata/flog_after_restart.log differ diff --git a/component/loki/source/podlogs/reconciler.go b/component/loki/source/podlogs/reconciler.go index 4d2ec87495f2..66baba5e86f7 100644 --- a/component/loki/source/podlogs/reconciler.go +++ b/component/loki/source/podlogs/reconciler.go @@ -131,7 +131,13 @@ func distributeTargets(c cluster.Cluster, targets []*kubetail.Target) []*kubetai return targets } - res := make([]*kubetail.Target, 0, (len(targets)+1)/len(c.Peers())) + peerCount := len(c.Peers()) + resCap := len(targets) + 1 + if peerCount != 0 { + resCap = (len(targets) + 1) / peerCount + } + + res := make([]*kubetail.Target, 0, resCap) for _, target := range targets { peers, err := c.Lookup(shard.StringKey(target.Labels().String()), 1, shard.OpReadWrite) @@ -141,7 +147,7 @@ func distributeTargets(c cluster.Cluster, targets []*kubetail.Target) []*kubetai // back to owning the target ourselves. res = append(res, target) } - if peers[0].Self { + if len(peers) == 0 || peers[0].Self { res = append(res, target) } } diff --git a/component/mimir/rules/kubernetes/rules.go b/component/mimir/rules/kubernetes/rules.go index 016a888d9104..14765a865095 100644 --- a/component/mimir/rules/kubernetes/rules.go +++ b/component/mimir/rules/kubernetes/rules.go @@ -261,10 +261,11 @@ func (c *Component) init() error { httpClient := c.args.HTTPClientConfig.Convert() c.mimirClient, err = mimirClient.New(c.log, mimirClient.Config{ - ID: c.args.TenantID, - Address: c.args.Address, - UseLegacyRoutes: c.args.UseLegacyRoutes, - HTTPClientConfig: *httpClient, + ID: c.args.TenantID, + Address: c.args.Address, + UseLegacyRoutes: c.args.UseLegacyRoutes, + PrometheusHTTPPrefix: c.args.PrometheusHTTPPrefix, + HTTPClientConfig: *httpClient, }, c.metrics.mimirClientTiming) if err != nil { return err diff --git a/component/mimir/rules/kubernetes/types.go b/component/mimir/rules/kubernetes/types.go index d8e2445e5bf2..390a4f6a4124 100644 --- a/component/mimir/rules/kubernetes/types.go +++ b/component/mimir/rules/kubernetes/types.go @@ -11,6 +11,7 @@ type Arguments struct { Address string `river:"address,attr"` TenantID string `river:"tenant_id,attr,optional"` UseLegacyRoutes bool `river:"use_legacy_routes,attr,optional"` + PrometheusHTTPPrefix string `river:"prometheus_http_prefix,attr,optional"` HTTPClientConfig config.HTTPClientConfig `river:",squash"` SyncInterval time.Duration `river:"sync_interval,attr,optional"` MimirNameSpacePrefix string `river:"mimir_namespace_prefix,attr,optional"` @@ -23,6 +24,7 @@ var DefaultArguments = Arguments{ SyncInterval: 30 * time.Second, MimirNameSpacePrefix: "agent", HTTPClientConfig: config.DefaultHTTPClientConfig, + PrometheusHTTPPrefix: "/prometheus", } // SetToDefault implements river.Defaulter. diff --git a/component/module/git/git.go b/component/module/git/git.go index dfe17ef2cb4a..607fcd4577a6 100644 --- a/component/module/git/git.go +++ b/component/module/git/git.go @@ -12,7 +12,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/component" "github.com/grafana/agent/component/module" - "github.com/grafana/agent/component/module/git/internal/vcs" + "github.com/grafana/agent/internal/vcs" "github.com/grafana/agent/pkg/flow/logging/level" ) diff --git a/component/otelcol/config_debug_metrics.go b/component/otelcol/config_debug_metrics.go index ca8575bee6de..f387f64cbfdf 100644 --- a/component/otelcol/config_debug_metrics.go +++ b/component/otelcol/config_debug_metrics.go @@ -7,7 +7,7 @@ type DebugMetricsArguments struct { // DefaultDebugMetricsArguments holds default settings for DebugMetricsArguments. var DefaultDebugMetricsArguments = DebugMetricsArguments{ - DisableHighCardinalityMetrics: false, + DisableHighCardinalityMetrics: true, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/config_k8s.go b/component/otelcol/config_k8s.go new file mode 100644 index 000000000000..b20407fd41fb --- /dev/null +++ b/component/otelcol/config_k8s.go @@ -0,0 +1,35 @@ +package otelcol + +import "fmt" + +const ( + KubernetesAPIConfig_AuthType_None = "none" + KubernetesAPIConfig_AuthType_ServiceAccount = "serviceAccount" + KubernetesAPIConfig_AuthType_KubeConfig = "kubeConfig" + KubernetesAPIConfig_AuthType_TLS = "tls" +) + +// KubernetesAPIConfig contains options relevant to connecting to the K8s API +type KubernetesAPIConfig struct { + // How to authenticate to the K8s API server. This can be one of `none` + // (for no auth), `serviceAccount` (to use the standard service account + // token provided to the agent pod), or `kubeConfig` to use credentials + // from `~/.kube/config`. + AuthType string `river:"auth_type,attr,optional"` + + // When using auth_type `kubeConfig`, override the current context. + Context string `river:"context,attr,optional"` +} + +// Validate returns an error if the config is invalid. +func (c *KubernetesAPIConfig) Validate() error { + switch c.AuthType { + case KubernetesAPIConfig_AuthType_None, + KubernetesAPIConfig_AuthType_ServiceAccount, + KubernetesAPIConfig_AuthType_KubeConfig, + KubernetesAPIConfig_AuthType_TLS: + return nil + default: + return fmt.Errorf("invalid auth_type %q", c.AuthType) + } +} diff --git a/component/otelcol/exporter/loadbalancing/loadbalancing.go b/component/otelcol/exporter/loadbalancing/loadbalancing.go index 3455318fef38..d4b8a87cf5f6 100644 --- a/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -59,7 +59,8 @@ var ( Protocol: Protocol{ OTLP: DefaultOTLPConfig, }, - RoutingKey: "traceID", + RoutingKey: "traceID", + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } DefaultOTLPConfig = OtlpConfig{ diff --git a/component/otelcol/exporter/loadbalancing/loadbalancing_test.go b/component/otelcol/exporter/loadbalancing/loadbalancing_test.go index 5e528dd373a3..abc37bc1703d 100644 --- a/component/otelcol/exporter/loadbalancing/loadbalancing_test.go +++ b/component/otelcol/exporter/loadbalancing/loadbalancing_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/exporter/loadbalancing" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" @@ -268,3 +269,83 @@ func TestConfigConversion(t *testing.T) { }) } } + +func TestDebugMetricsConfig(t *testing.T) { + tests := []struct { + testName string + agentCfg string + expected otelcol.DebugMetricsArguments + }{ + { + testName: "default", + agentCfg: ` + resolver { + static { + hostnames = ["endpoint-1"] + } + } + protocol { + otlp { + client {} + } + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + { + testName: "explicit_false", + agentCfg: ` + resolver { + static { + hostnames = ["endpoint-1"] + } + } + protocol { + otlp { + client {} + } + } + debug_metrics { + disable_high_cardinality_metrics = false + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: false, + }, + }, + { + testName: "explicit_true", + agentCfg: ` + resolver { + static { + hostnames = ["endpoint-1"] + } + } + protocol { + otlp { + client {} + } + } + debug_metrics { + disable_high_cardinality_metrics = true + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args loadbalancing.Arguments + require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + _, err := args.Convert() + require.NoError(t, err) + + require.Equal(t, tc.expected, args.DebugMetricsConfig()) + }) + } +} diff --git a/component/otelcol/exporter/logging/logging.go b/component/otelcol/exporter/logging/logging.go index 13d12fbf312e..3156309ab7cf 100644 --- a/component/otelcol/exporter/logging/logging.go +++ b/component/otelcol/exporter/logging/logging.go @@ -41,6 +41,7 @@ var DefaultArguments = Arguments{ Verbosity: configtelemetry.LevelNormal, SamplingInitial: 2, SamplingThereafter: 500, + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/exporter/otlp/otlp.go b/component/otelcol/exporter/otlp/otlp.go index 7ca10d2c2c0b..f473c4722571 100644 --- a/component/otelcol/exporter/otlp/otlp.go +++ b/component/otelcol/exporter/otlp/otlp.go @@ -43,10 +43,11 @@ var _ exporter.Arguments = Arguments{} // DefaultArguments holds default values for Arguments. var DefaultArguments = Arguments{ - Timeout: otelcol.DefaultTimeout, - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultGRPCClientArguments, + Timeout: otelcol.DefaultTimeout, + Queue: otelcol.DefaultQueueArguments, + Retry: otelcol.DefaultRetryArguments, + Client: DefaultGRPCClientArguments, + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/exporter/otlp/otlp_test.go b/component/otelcol/exporter/otlp/otlp_test.go index 9c256ab94ba2..13bd8e56883d 100644 --- a/component/otelcol/exporter/otlp/otlp_test.go +++ b/component/otelcol/exporter/otlp/otlp_test.go @@ -143,3 +143,62 @@ func createTestTraces() ptrace.Traces { } return data } + +func TestDebugMetricsConfig(t *testing.T) { + tests := []struct { + testName string + agentCfg string + expected otelcol.DebugMetricsArguments + }{ + { + testName: "default", + agentCfg: ` + client { + endpoint = "tempo-xxx.grafana.net/tempo:443" + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + { + testName: "explicit_false", + agentCfg: ` + client { + endpoint = "tempo-xxx.grafana.net/tempo:443" + } + debug_metrics { + disable_high_cardinality_metrics = false + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: false, + }, + }, + { + testName: "explicit_true", + agentCfg: ` + client { + endpoint = "tempo-xxx.grafana.net/tempo:443" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args otlp.Arguments + require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + _, err := args.Convert() + require.NoError(t, err) + + require.Equal(t, tc.expected, args.DebugMetricsConfig()) + }) + } +} diff --git a/component/otelcol/exporter/otlphttp/otlphttp.go b/component/otelcol/exporter/otlphttp/otlphttp.go index 0508ec2e6289..b8d3aeaf6956 100644 --- a/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/component/otelcol/exporter/otlphttp/otlphttp.go @@ -48,9 +48,10 @@ var _ exporter.Arguments = Arguments{} // DefaultArguments holds default values for Arguments. var DefaultArguments = Arguments{ - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultHTTPClientArguments, + Queue: otelcol.DefaultQueueArguments, + Retry: otelcol.DefaultRetryArguments, + Client: DefaultHTTPClientArguments, + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/exporter/otlphttp/otlphttp_test.go b/component/otelcol/exporter/otlphttp/otlphttp_test.go index 64e6328b2fb5..6a2449db6204 100644 --- a/component/otelcol/exporter/otlphttp/otlphttp_test.go +++ b/component/otelcol/exporter/otlphttp/otlphttp_test.go @@ -114,3 +114,62 @@ func createTestTraces() ptrace.Traces { } return data } + +func TestDebugMetricsConfig(t *testing.T) { + tests := []struct { + testName string + agentCfg string + expected otelcol.DebugMetricsArguments + }{ + { + testName: "default", + agentCfg: ` + client { + endpoint = "http://tempo:4317" + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + { + testName: "explicit_false", + agentCfg: ` + client { + endpoint = "http://tempo:4317" + } + debug_metrics { + disable_high_cardinality_metrics = false + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: false, + }, + }, + { + testName: "explicit_true", + agentCfg: ` + client { + endpoint = "http://tempo:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args otlphttp.Arguments + require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + _, err := args.Convert() + require.NoError(t, err) + + require.Equal(t, tc.expected, args.DebugMetricsConfig()) + }) + } +} diff --git a/component/otelcol/processor/processortest/compare_signals.go b/component/otelcol/processor/processortest/compare_signals.go new file mode 100644 index 000000000000..3fdc52cad1e1 --- /dev/null +++ b/component/otelcol/processor/processortest/compare_signals.go @@ -0,0 +1,46 @@ +package processortest + +import ( + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/plogtest" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/ptracetest" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func CompareMetrics(t *testing.T, expected, actual pmetric.Metrics) { + err := pmetrictest.CompareMetrics( + expected, + actual, + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreSummaryDataPointValueAtQuantileSliceOrder(), + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreStartTimestamp(), + ) + require.NoError(t, err) +} + +func CompareLogs(t *testing.T, expected, actual plog.Logs) { + err := plogtest.CompareLogs( + expected, + actual, + ) + require.NoError(t, err) +} + +func CompareTraces(t *testing.T, expected, actual ptrace.Traces) { + err := ptracetest.CompareTraces( + expected, + actual, + ptracetest.IgnoreResourceSpansOrder(), + ptracetest.IgnoreScopeSpansOrder(), + ) + require.NoError(t, err) +} diff --git a/component/otelcol/processor/processortest/compare_signals_test.go b/component/otelcol/processor/processortest/compare_signals_test.go new file mode 100644 index 000000000000..609b1754354c --- /dev/null +++ b/component/otelcol/processor/processortest/compare_signals_test.go @@ -0,0 +1,36 @@ +package processortest + +import ( + "testing" + + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func Test_ScopeMetricsOrder(t *testing.T) { + metric1 := pmetric.NewMetrics() + metric1_res := metric1.ResourceMetrics().AppendEmpty() + metric1_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope1") + metric1_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope2") + + metric2 := pmetric.NewMetrics() + metric2_res := metric2.ResourceMetrics().AppendEmpty() + metric2_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope2") + metric2_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope1") + + CompareMetrics(t, metric1, metric2) +} + +func Test_ScopeSpansAttributesOrder(t *testing.T) { + trace1 := ptrace.NewTraces() + trace1_span_attr := trace1.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Scope().Attributes() + trace1_span_attr.PutStr("key1", "val1") + trace1_span_attr.PutStr("key2", "val2") + + trace2 := ptrace.NewTraces() + trace2_span_attr := trace2.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Scope().Attributes() + trace2_span_attr.PutStr("key2", "val2") + trace2_span_attr.PutStr("key1", "val1") + + CompareTraces(t, trace1, trace2) +} diff --git a/component/otelcol/processor/processortest/processortest.go b/component/otelcol/processor/processortest/processortest.go index 0298f8e9250b..e9a99ec65024 100644 --- a/component/otelcol/processor/processortest/processortest.go +++ b/component/otelcol/processor/processortest/processortest.go @@ -75,16 +75,16 @@ func TestRunProcessor(c ProcessorRunConfig) { // type traceToLogSignal struct { - logCh chan plog.Logs - inputTrace ptrace.Traces - expectedOuutputLog plog.Logs + logCh chan plog.Logs + inputTrace ptrace.Traces + expectedOutputLog plog.Logs } func NewTraceToLogSignal(inputJson string, expectedOutputJson string) Signal { return &traceToLogSignal{ - logCh: make(chan plog.Logs), - inputTrace: CreateTestTraces(inputJson), - expectedOuutputLog: CreateTestLogs(expectedOutputJson), + logCh: make(chan plog.Logs), + inputTrace: CreateTestTraces(inputJson), + expectedOutputLog: CreateTestLogs(expectedOutputJson), } } @@ -101,10 +101,8 @@ func (s traceToLogSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for logs") - case tr := <-s.logCh: - trStr := marshalLogs(tr) - expStr := marshalLogs(s.expectedOuutputLog) - require.JSONEq(t, expStr, trStr) + case actualLog := <-s.logCh: + CompareLogs(t, s.expectedOutputLog, actualLog) } } @@ -113,17 +111,17 @@ func (s traceToLogSignal) CheckOutput(t *testing.T) { // type traceToMetricSignal struct { - metricCh chan pmetric.Metrics - inputTrace ptrace.Traces - expectedOuutputMetric pmetric.Metrics + metricCh chan pmetric.Metrics + inputTrace ptrace.Traces + expectedOutputMetric pmetric.Metrics } // Any timestamps inside expectedOutputJson should be set to 0. func NewTraceToMetricSignal(inputJson string, expectedOutputJson string) Signal { return &traceToMetricSignal{ - metricCh: make(chan pmetric.Metrics), - inputTrace: CreateTestTraces(inputJson), - expectedOuutputMetric: CreateTestMetrics(expectedOutputJson), + metricCh: make(chan pmetric.Metrics), + inputTrace: CreateTestTraces(inputJson), + expectedOutputMetric: CreateTestMetrics(expectedOutputJson), } } @@ -135,57 +133,6 @@ func (s traceToMetricSignal) ConsumeInput(ctx context.Context, consumer otelcol. return consumer.ConsumeTraces(ctx, s.inputTrace) } -// Set the timestamp of all data points to 0. -// This helps avoid flaky tests due to timestamps. -func setMetricTimestampToZero(metrics pmetric.Metrics) { - // Loop over all resource metrics - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { - rm := metrics.ResourceMetrics().At(i) - // Loop over all metric scopes. - for j := 0; j < rm.ScopeMetrics().Len(); j++ { - sm := rm.ScopeMetrics().At(j) - // Loop over all metrics. - for k := 0; k < sm.Metrics().Len(); k++ { - m := sm.Metrics().At(k) - switch m.Type() { - case pmetric.MetricTypeSum: - // Loop over all data points. - for l := 0; l < m.Sum().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Sum().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - case pmetric.MetricTypeGauge: - // Loop over all data points. - for l := 0; l < m.Gauge().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Gauge().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - case pmetric.MetricTypeHistogram: - // Loop over all data points. - for l := 0; l < m.Histogram().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Histogram().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - case pmetric.MetricTypeSummary: - // Loop over all data points. - for l := 0; l < m.Summary().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Summary().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - } - } - } - } -} - // Wait for the component to finish and check its output. func (s traceToMetricSignal) CheckOutput(t *testing.T) { // Set the timeout to a few seconds so that all components have finished. @@ -196,14 +143,8 @@ func (s traceToMetricSignal) CheckOutput(t *testing.T) { select { case <-time.After(timeout): require.FailNow(t, "failed waiting for metrics") - case tr := <-s.metricCh: - setMetricTimestampToZero(tr) - trStr := marshalMetrics(tr) - - expStr := marshalMetrics(s.expectedOuutputMetric) - // Set a field from the json to an empty string to avoid flaky tests containing timestamps. - - require.JSONEq(t, expStr, trStr) + case actualMetric := <-s.metricCh: + CompareMetrics(t, s.expectedOutputMetric, actualMetric) } } @@ -212,16 +153,16 @@ func (s traceToMetricSignal) CheckOutput(t *testing.T) { // type traceSignal struct { - traceCh chan ptrace.Traces - inputTrace ptrace.Traces - expectedOuutputTrace ptrace.Traces + traceCh chan ptrace.Traces + inputTrace ptrace.Traces + expectedOutputTrace ptrace.Traces } func NewTraceSignal(inputJson string, expectedOutputJson string) Signal { return &traceSignal{ - traceCh: make(chan ptrace.Traces), - inputTrace: CreateTestTraces(inputJson), - expectedOuutputTrace: CreateTestTraces(expectedOutputJson), + traceCh: make(chan ptrace.Traces), + inputTrace: CreateTestTraces(inputJson), + expectedOutputTrace: CreateTestTraces(expectedOutputJson), } } @@ -238,10 +179,8 @@ func (s traceSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for traces") - case tr := <-s.traceCh: - trStr := marshalTraces(tr) - expStr := marshalTraces(s.expectedOuutputTrace) - require.JSONEq(t, expStr, trStr) + case actualTrace := <-s.traceCh: + CompareTraces(t, s.expectedOutputTrace, actualTrace) } } @@ -256,15 +195,6 @@ func CreateTestTraces(traceJson string) ptrace.Traces { return data } -func marshalTraces(trace ptrace.Traces) string { - marshaler := &ptrace.JSONMarshaler{} - data, err := marshaler.MarshalTraces(trace) - if err != nil { - panic(err) - } - return string(data) -} - // makeTracesOutput returns ConsumerArguments which will forward traces to the // provided channel. func makeTracesOutput(ch chan ptrace.Traces) *otelcol.ConsumerArguments { @@ -289,16 +219,16 @@ func makeTracesOutput(ch chan ptrace.Traces) *otelcol.ConsumerArguments { // type logSignal struct { - logCh chan plog.Logs - inputLog plog.Logs - expectedOuutputLog plog.Logs + logCh chan plog.Logs + inputLog plog.Logs + expectedOutputLog plog.Logs } func NewLogSignal(inputJson string, expectedOutputJson string) Signal { return &logSignal{ - logCh: make(chan plog.Logs), - inputLog: CreateTestLogs(inputJson), - expectedOuutputLog: CreateTestLogs(expectedOutputJson), + logCh: make(chan plog.Logs), + inputLog: CreateTestLogs(inputJson), + expectedOutputLog: CreateTestLogs(expectedOutputJson), } } @@ -315,10 +245,8 @@ func (s logSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for logs") - case tr := <-s.logCh: - trStr := marshalLogs(tr) - expStr := marshalLogs(s.expectedOuutputLog) - require.JSONEq(t, expStr, trStr) + case actualLog := <-s.logCh: + CompareLogs(t, s.expectedOutputLog, actualLog) } } @@ -352,30 +280,21 @@ func CreateTestLogs(logJson string) plog.Logs { return data } -func marshalLogs(log plog.Logs) string { - marshaler := &plog.JSONMarshaler{} - data, err := marshaler.MarshalLogs(log) - if err != nil { - panic(err) - } - return string(data) -} - // // Metrics // type metricSignal struct { - metricCh chan pmetric.Metrics - inputMetric pmetric.Metrics - expectedOuutputMetric pmetric.Metrics + metricCh chan pmetric.Metrics + inputMetric pmetric.Metrics + expectedOutputMetric pmetric.Metrics } func NewMetricSignal(inputJson string, expectedOutputJson string) Signal { return &metricSignal{ - metricCh: make(chan pmetric.Metrics), - inputMetric: CreateTestMetrics(inputJson), - expectedOuutputMetric: CreateTestMetrics(expectedOutputJson), + metricCh: make(chan pmetric.Metrics), + inputMetric: CreateTestMetrics(inputJson), + expectedOutputMetric: CreateTestMetrics(expectedOutputJson), } } @@ -392,10 +311,8 @@ func (s metricSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for logs") - case tr := <-s.metricCh: - trStr := marshalMetrics(tr) - expStr := marshalMetrics(s.expectedOuutputMetric) - require.JSONEq(t, expStr, trStr) + case actualMetric := <-s.metricCh: + CompareMetrics(t, s.expectedOutputMetric, actualMetric) } } @@ -428,12 +345,3 @@ func CreateTestMetrics(metricJson string) pmetric.Metrics { } return data } - -func marshalMetrics(metrics pmetric.Metrics) string { - marshaler := &pmetric.JSONMarshaler{} - data, err := marshaler.MarshalMetrics(metrics) - if err != nil { - panic(err) - } - return string(data) -} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go b/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go new file mode 100644 index 000000000000..9b715eac4a12 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go @@ -0,0 +1,72 @@ +package ec2 + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "ec2" + +// Config defines user-specified configurations unique to the EC2 detector +type Config struct { + // Tags is a list of regex's to match ec2 instance tag keys that users want + // to add as resource attributes to processed data + Tags []string `river:"tags,attr,optional"` + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudAvailabilityZone: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostImageID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + HostType: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "tags": append([]string{}, args.Tags...), + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config to enable and disable resource attributes. +type ResourceAttributesConfig struct { + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostImageID rac.ResourceAttributeConfig `river:"host.image.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + HostType rac.ResourceAttributeConfig `river:"host.type,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.availability_zone": r.CloudAvailabilityZone.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "host.id": r.HostID.Convert(), + "host.image.id": r.HostImageID.Convert(), + "host.name": r.HostName.Convert(), + "host.type": r.HostType.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go b/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go new file mode 100644 index 000000000000..1532bd376567 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go @@ -0,0 +1,86 @@ +package ecs + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "ecs" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + AwsEcsClusterArn: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsLaunchtype: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsTaskArn: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsTaskFamily: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsTaskRevision: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogGroupArns: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogGroupNames: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogStreamArns: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogStreamNames: rac.ResourceAttributeConfig{Enabled: true}, + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudAvailabilityZone: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args *Config) Convert() map[string]interface{} { + if args == nil { + return nil + } + + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for ecs resource attributes. +type ResourceAttributesConfig struct { + AwsEcsClusterArn rac.ResourceAttributeConfig `river:"aws.ecs.cluster.arn,block,optional"` + AwsEcsLaunchtype rac.ResourceAttributeConfig `river:"aws.ecs.launchtype,block,optional"` + AwsEcsTaskArn rac.ResourceAttributeConfig `river:"aws.ecs.task.arn,block,optional"` + AwsEcsTaskFamily rac.ResourceAttributeConfig `river:"aws.ecs.task.family,block,optional"` + AwsEcsTaskRevision rac.ResourceAttributeConfig `river:"aws.ecs.task.revision,block,optional"` + AwsLogGroupArns rac.ResourceAttributeConfig `river:"aws.log.group.arns,block,optional"` + AwsLogGroupNames rac.ResourceAttributeConfig `river:"aws.log.group.names,block,optional"` + AwsLogStreamArns rac.ResourceAttributeConfig `river:"aws.log.stream.arns,block,optional"` + AwsLogStreamNames rac.ResourceAttributeConfig `river:"aws.log.stream.names,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "aws.ecs.cluster.arn": r.AwsEcsClusterArn.Convert(), + "aws.ecs.launchtype": r.AwsEcsLaunchtype.Convert(), + "aws.ecs.task.arn": r.AwsEcsTaskArn.Convert(), + "aws.ecs.task.family": r.AwsEcsTaskFamily.Convert(), + "aws.ecs.task.revision": r.AwsEcsTaskRevision.Convert(), + "aws.log.group.arns": r.AwsLogGroupArns.Convert(), + "aws.log.group.names": r.AwsLogGroupNames.Convert(), + "aws.log.stream.arns": r.AwsLogStreamArns.Convert(), + "aws.log.stream.names": r.AwsLogStreamNames.Convert(), + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.availability_zone": r.CloudAvailabilityZone.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go b/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go new file mode 100644 index 000000000000..6290180b3086 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go @@ -0,0 +1,46 @@ +package eks + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "eks" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for eks resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go b/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go new file mode 100644 index 000000000000..dd670372cee7 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go @@ -0,0 +1,55 @@ +package elasticbeanstalk + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "elasticbeanstalk" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + DeploymentEnvironment: rac.ResourceAttributeConfig{Enabled: true}, + ServiceInstanceID: rac.ResourceAttributeConfig{Enabled: true}, + ServiceVersion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for elastic_beanstalk resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + DeploymentEnvironment rac.ResourceAttributeConfig `river:"deployment.environment,block,optional"` + ServiceInstanceID rac.ResourceAttributeConfig `river:"service.instance.id,block,optional"` + ServiceVersion rac.ResourceAttributeConfig `river:"service.version,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "deployment.environment": r.DeploymentEnvironment.Convert(), + "service.instance.id": r.ServiceInstanceID.Convert(), + "service.version": r.ServiceVersion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go b/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go new file mode 100644 index 000000000000..19a4cc7b4e80 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go @@ -0,0 +1,67 @@ +package lambda + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "lambda" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + AwsLogGroupNames: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogStreamNames: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + FaasInstance: rac.ResourceAttributeConfig{Enabled: true}, + FaasMaxMemory: rac.ResourceAttributeConfig{Enabled: true}, + FaasName: rac.ResourceAttributeConfig{Enabled: true}, + FaasVersion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for lambda resource attributes. +type ResourceAttributesConfig struct { + AwsLogGroupNames rac.ResourceAttributeConfig `river:"aws.log.group.names,block,optional"` + AwsLogStreamNames rac.ResourceAttributeConfig `river:"aws.log.stream.names,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + FaasInstance rac.ResourceAttributeConfig `river:"faas.instance,block,optional"` + FaasMaxMemory rac.ResourceAttributeConfig `river:"faas.max_memory,block,optional"` + FaasName rac.ResourceAttributeConfig `river:"faas.name,block,optional"` + FaasVersion rac.ResourceAttributeConfig `river:"faas.version,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "aws.log.group.names": r.AwsLogGroupNames.Convert(), + "aws.log.stream.names": r.AwsLogStreamNames.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "faas.instance": r.FaasInstance.Convert(), + "faas.max_memory": r.FaasMaxMemory.Convert(), + "faas.name": r.FaasName.Convert(), + "faas.version": r.FaasVersion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go b/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go new file mode 100644 index 000000000000..4501c4e33a6f --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go @@ -0,0 +1,46 @@ +package aks + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "aks" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for aks resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/azure/config.go b/component/otelcol/processor/resourcedetection/internal/azure/config.go new file mode 100644 index 000000000000..05e612d1d2d0 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/azure/config.go @@ -0,0 +1,70 @@ +package azure + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "azure" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + AzureResourcegroupName: rac.ResourceAttributeConfig{Enabled: true}, + AzureVMName: rac.ResourceAttributeConfig{Enabled: true}, + AzureVMScalesetName: rac.ResourceAttributeConfig{Enabled: true}, + AzureVMSize: rac.ResourceAttributeConfig{Enabled: true}, + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for azure resource attributes. +type ResourceAttributesConfig struct { + AzureResourcegroupName rac.ResourceAttributeConfig `river:"azure.resourcegroup.name,block,optional"` + AzureVMName rac.ResourceAttributeConfig `river:"azure.vm.name,block,optional"` + AzureVMScalesetName rac.ResourceAttributeConfig `river:"azure.vm.scaleset.name,block,optional"` + AzureVMSize rac.ResourceAttributeConfig `river:"azure.vm.size,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "azure.resourcegroup.name": r.AzureResourcegroupName.Convert(), + "azure.vm.name": r.AzureVMName.Convert(), + "azure.vm.scaleset.name": r.AzureVMScalesetName.Convert(), + "azure.vm.size": r.AzureVMSize.Convert(), + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/consul/config.go b/component/otelcol/processor/resourcedetection/internal/consul/config.go new file mode 100644 index 000000000000..4cc2e9b5beb3 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -0,0 +1,94 @@ +package consul + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" + "github.com/grafana/river/rivertypes" + "go.opentelemetry.io/collector/config/configopaque" +) + +const Name = "consul" + +// The struct requires no user-specified fields by default as consul agent's default +// configuration will be provided to the API client. +// See `consul.go#NewDetector` for more information. +type Config struct { + // Address is the address of the Consul server + Address string `river:"address,attr,optional"` + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string `river:"datacenter,attr,optional"` + + // Token is used to provide a per-request ACL token which overrides the + // agent's default (empty) token. Token is only required if + // [Consul's ACL System](https://www.consul.io/docs/security/acl/acl-system) + // is enabled. + Token rivertypes.Secret `river:"token,attr,optional"` + + // TokenFile is not necessary in River because users can use the local.file + // Flow component instead. + // + // TokenFile string `river:"token_file"` + + // Namespace is the name of the namespace to send along for the request + // when no other Namespace is present in the QueryOptions + Namespace string `river:"namespace,attr,optional"` + + // Allowlist of [Consul Metadata](https://www.consul.io/docs/agent/options#node_meta) + // keys to use as resource attributes. + MetaLabels []string `river:"meta,attr,optional"` + + // ResourceAttributes configuration for Consul detector + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + //TODO(ptodev): Change the OTel Collector's "meta" param to be a slice instead of a map. + var metaLabels map[string]string + if args.MetaLabels != nil { + metaLabels = make(map[string]string, len(args.MetaLabels)) + for _, label := range args.MetaLabels { + metaLabels[label] = "" + } + } + + return map[string]interface{}{ + "address": args.Address, + "datacenter": args.Datacenter, + "token": configopaque.String(args.Token), + "namespace": args.Namespace, + "meta": metaLabels, + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for consul resource attributes. +type ResourceAttributesConfig struct { + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` +} + +func (r *ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.region": r.CloudRegion.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/docker/config.go b/component/otelcol/processor/resourcedetection/internal/docker/config.go new file mode 100644 index 000000000000..f8c1bdc39b82 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/docker/config.go @@ -0,0 +1,46 @@ +package docker + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "docker" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + HostName: rac.ResourceAttributeConfig{Enabled: true}, + OsType: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for docker resource attributes. +type ResourceAttributesConfig struct { + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + OsType rac.ResourceAttributeConfig `river:"os.type,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "host.name": r.HostName.Convert(), + "os.type": r.OsType.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/gcp/config.go b/component/otelcol/processor/resourcedetection/internal/gcp/config.go new file mode 100644 index 000000000000..76395828a97c --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/gcp/config.go @@ -0,0 +1,91 @@ +package gcp + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "gcp" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudAvailabilityZone: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + FaasID: rac.ResourceAttributeConfig{Enabled: true}, + FaasInstance: rac.ResourceAttributeConfig{Enabled: true}, + FaasName: rac.ResourceAttributeConfig{Enabled: true}, + FaasVersion: rac.ResourceAttributeConfig{Enabled: true}, + GcpCloudRunJobExecution: rac.ResourceAttributeConfig{Enabled: true}, + GcpCloudRunJobTaskIndex: rac.ResourceAttributeConfig{Enabled: true}, + GcpGceInstanceHostname: rac.ResourceAttributeConfig{Enabled: false}, + GcpGceInstanceName: rac.ResourceAttributeConfig{Enabled: false}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + HostType: rac.ResourceAttributeConfig{Enabled: true}, + K8sClusterName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for gcp resource attributes. +type ResourceAttributesConfig struct { + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + FaasID rac.ResourceAttributeConfig `river:"faas.id,block,optional"` + FaasInstance rac.ResourceAttributeConfig `river:"faas.instance,block,optional"` + FaasName rac.ResourceAttributeConfig `river:"faas.name,block,optional"` + FaasVersion rac.ResourceAttributeConfig `river:"faas.version,block,optional"` + GcpCloudRunJobExecution rac.ResourceAttributeConfig `river:"gcp.cloud_run.job.execution,block,optional"` + GcpCloudRunJobTaskIndex rac.ResourceAttributeConfig `river:"gcp.cloud_run.job.task_index,block,optional"` + GcpGceInstanceHostname rac.ResourceAttributeConfig `river:"gcp.gce.instance.hostname,block,optional"` + GcpGceInstanceName rac.ResourceAttributeConfig `river:"gcp.gce.instance.name,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + HostType rac.ResourceAttributeConfig `river:"host.type,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.availability_zone": r.CloudAvailabilityZone.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "faas.id": r.FaasID.Convert(), + "faas.instance": r.FaasInstance.Convert(), + "faas.name": r.FaasName.Convert(), + "faas.version": r.FaasVersion.Convert(), + "gcp.cloud_run.job.execution": r.GcpCloudRunJobExecution.Convert(), + "gcp.cloud_run.job.task_index": r.GcpCloudRunJobTaskIndex.Convert(), + "gcp.gce.instance.hostname": r.GcpGceInstanceHostname.Convert(), + "gcp.gce.instance.name": r.GcpGceInstanceName.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + "host.type": r.HostType.Convert(), + "k8s.cluster.name": r.K8sClusterName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/heroku/config.go b/component/otelcol/processor/resourcedetection/internal/heroku/config.go new file mode 100644 index 000000000000..6e7681269abb --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/heroku/config.go @@ -0,0 +1,64 @@ +package heroku + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "heroku" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + HerokuAppID: rac.ResourceAttributeConfig{Enabled: true}, + HerokuDynoID: rac.ResourceAttributeConfig{Enabled: true}, + HerokuReleaseCommit: rac.ResourceAttributeConfig{Enabled: true}, + HerokuReleaseCreationTimestamp: rac.ResourceAttributeConfig{Enabled: true}, + ServiceInstanceID: rac.ResourceAttributeConfig{Enabled: true}, + ServiceName: rac.ResourceAttributeConfig{Enabled: true}, + ServiceVersion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for heroku resource attributes. +type ResourceAttributesConfig struct { + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + HerokuAppID rac.ResourceAttributeConfig `river:"heroku.app.id,block,optional"` + HerokuDynoID rac.ResourceAttributeConfig `river:"heroku.dyno.id,block,optional"` + HerokuReleaseCommit rac.ResourceAttributeConfig `river:"heroku.release.commit,block,optional"` + HerokuReleaseCreationTimestamp rac.ResourceAttributeConfig `river:"heroku.release.creation_timestamp,block,optional"` + ServiceInstanceID rac.ResourceAttributeConfig `river:"service.instance.id,block,optional"` + ServiceName rac.ResourceAttributeConfig `river:"service.name,block,optional"` + ServiceVersion rac.ResourceAttributeConfig `river:"service.version,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.provider": r.CloudProvider.Convert(), + "heroku.app.id": r.HerokuAppID.Convert(), + "heroku.dyno.id": r.HerokuDynoID.Convert(), + "heroku.release.commit": r.HerokuReleaseCommit.Convert(), + "heroku.release.creation_timestamp": r.HerokuReleaseCreationTimestamp.Convert(), + "service.instance.id": r.ServiceInstanceID.Convert(), + "service.name": r.ServiceName.Convert(), + "service.version": r.ServiceVersion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/k8snode/config.go b/component/otelcol/processor/resourcedetection/internal/k8snode/config.go new file mode 100644 index 000000000000..8d47362eecb6 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/k8snode/config.go @@ -0,0 +1,75 @@ +package k8snode + +import ( + "github.com/grafana/agent/component/otelcol" + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "kubernetes_node" + +type Config struct { + KubernetesAPIConfig otelcol.KubernetesAPIConfig `river:",squash"` + // NodeFromEnv can be used to extract the node name from an environment + // variable. The value must be the name of the environment variable. + // This is useful when the node where an Agent will run on cannot be + // predicted. In such cases, the Kubernetes downward API can be used to + // add the node name to each pod as an environment variable. K8s tagger + // can then read this value and filter pods by it. + // + // For example, node name can be passed to each agent with the downward API as follows + // + // env: + // - name: K8S_NODE_NAME + // valueFrom: + // fieldRef: + // fieldPath: spec.nodeName + // + // Then the NodeFromEnv field can be set to `K8S_NODE_NAME` to filter all pods by the node that + // the agent is running on. + // + // More on downward API here: https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ + NodeFromEnvVar string `river:"node_from_env_var,attr,optional"` + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +var DefaultArguments = Config{ + KubernetesAPIConfig: otelcol.KubernetesAPIConfig{ + AuthType: otelcol.KubernetesAPIConfig_AuthType_None, + }, + NodeFromEnvVar: "K8S_NODE_NAME", + ResourceAttributes: ResourceAttributesConfig{ + K8sNodeName: rac.ResourceAttributeConfig{Enabled: true}, + K8sNodeUID: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (c *Config) SetToDefault() { + *c = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + //TODO: K8sAPIConfig is squashed - is there a better way to "convert" it? + "auth_type": args.KubernetesAPIConfig.AuthType, + "context": args.KubernetesAPIConfig.Context, + "node_from_env_var": args.NodeFromEnvVar, + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for k8snode resource attributes. +type ResourceAttributesConfig struct { + K8sNodeName rac.ResourceAttributeConfig `river:"k8s.node.name,block,optional"` + K8sNodeUID rac.ResourceAttributeConfig `river:"k8s.node.uid,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "k8s.node.name": r.K8sNodeName.Convert(), + "k8s.node.uid": r.K8sNodeUID.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/openshift/config.go b/component/otelcol/processor/resourcedetection/internal/openshift/config.go new file mode 100644 index 000000000000..362cd9bff459 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/openshift/config.go @@ -0,0 +1,68 @@ +package openshift + +import ( + "github.com/grafana/agent/component/otelcol" + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "openshift" + +// Config can contain user-specified inputs to overwrite default values. +// See `openshift.go#NewDetector` for more information. +type Config struct { + // Address is the address of the openshift api server + Address string `river:"address,attr,optional"` + + // Token is used to identify against the openshift api server + Token string `river:"token,attr,optional"` + + // TLSSettings contains TLS configurations that are specific to client + // connection used to communicate with the Openshift API. + TLSSettings otelcol.TLSClientArguments `river:"tls,block,optional"` + + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + K8sClusterName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "address": args.Address, + "token": args.Token, + "tls": args.TLSSettings.Convert(), + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for openshift resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "k8s.cluster.name": r.K8sClusterName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go b/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go new file mode 100644 index 000000000000..ff5540a2f539 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go @@ -0,0 +1,12 @@ +package resource_attribute_config + +// Configures whether a resource attribute should be enabled or not. +type ResourceAttributeConfig struct { + Enabled bool `river:"enabled,attr"` +} + +func (r ResourceAttributeConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "enabled": r.Enabled, + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/system/config.go b/component/otelcol/processor/resourcedetection/internal/system/config.go new file mode 100644 index 000000000000..82e25cb45e97 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -0,0 +1,95 @@ +package system + +import ( + "fmt" + + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "system" + +// Config defines user-specified configurations unique to the system detector +type Config struct { + // The HostnameSources is a priority list of sources from which hostname will be fetched. + // In case of the error in fetching hostname from source, + // the next source from the list will be considered. + HostnameSources []string `river:"hostname_sources,attr,optional"` + + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +var DefaultArguments = Config{ + HostnameSources: []string{"dns", "os"}, + ResourceAttributes: ResourceAttributesConfig{ + HostArch: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUCacheL2Size: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUFamily: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUModelID: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUModelName: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUStepping: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUVendorID: rac.ResourceAttributeConfig{Enabled: false}, + HostID: rac.ResourceAttributeConfig{Enabled: false}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + OsDescription: rac.ResourceAttributeConfig{Enabled: false}, + OsType: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (c *Config) SetToDefault() { + *c = DefaultArguments +} + +// Validate config +func (cfg *Config) Validate() error { + for _, hostnameSource := range cfg.HostnameSources { + switch hostnameSource { + case "os", "dns", "cname", "lookup": + // Valid option - nothing to do + default: + return fmt.Errorf("invalid hostname source: %s", hostnameSource) + } + } + return nil +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "hostname_sources": args.HostnameSources, + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for system resource attributes. +type ResourceAttributesConfig struct { + HostArch rac.ResourceAttributeConfig `river:"host.arch,block,optional"` + HostCPUCacheL2Size rac.ResourceAttributeConfig `river:"host.cpu.cache.l2.size,block,optional"` + HostCPUFamily rac.ResourceAttributeConfig `river:"host.cpu.family,block,optional"` + HostCPUModelID rac.ResourceAttributeConfig `river:"host.cpu.model.id,block,optional"` + HostCPUModelName rac.ResourceAttributeConfig `river:"host.cpu.model.name,block,optional"` + HostCPUStepping rac.ResourceAttributeConfig `river:"host.cpu.stepping,block,optional"` + HostCPUVendorID rac.ResourceAttributeConfig `river:"host.cpu.vendor.id,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + OsDescription rac.ResourceAttributeConfig `river:"os.description,block,optional"` + OsType rac.ResourceAttributeConfig `river:"os.type,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "host.arch": r.HostArch.Convert(), + "host.cpu.cache.l2.size": r.HostCPUCacheL2Size.Convert(), + "host.cpu.family": r.HostCPUFamily.Convert(), + "host.cpu.model.id": r.HostCPUModelID.Convert(), + "host.cpu.model.name": r.HostCPUModelName.Convert(), + "host.cpu.stepping": r.HostCPUStepping.Convert(), + "host.cpu.vendor.id": r.HostCPUVendorID.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + "os.description": r.OsDescription.Convert(), + "os.type": r.OsType.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/resourcedetection.go b/component/otelcol/processor/resourcedetection/resourcedetection.go new file mode 100644 index 000000000000..806d72c9d2e5 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -0,0 +1,247 @@ +package resourcedetection + +import ( + "fmt" + "time" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/otelcol" + "github.com/grafana/agent/component/otelcol/processor" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ec2" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ecs" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/eks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/lambda" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure/aks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/consul" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/docker" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/gcp" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/heroku" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/k8snode" + kubernetes_node "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/k8snode" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/openshift" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/system" + "github.com/grafana/river" + "github.com/mitchellh/mapstructure" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" + otelcomponent "go.opentelemetry.io/collector/component" + otelextension "go.opentelemetry.io/collector/extension" +) + +func init() { + component.Register(component.Registration{ + Name: "otelcol.processor.resourcedetection", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + fact := resourcedetectionprocessor.NewFactory() + return processor.New(opts, fact, args.(Arguments)) + }, + }) +} + +// Arguments configures the otelcol.processor.resourcedetection component. +type Arguments struct { + // Detectors is an ordered list of named detectors that should be + // run to attempt to detect resource information. + Detectors []string `river:"detectors,attr,optional"` + + // Override indicates whether any existing resource attributes + // should be overridden or preserved. Defaults to true. + Override bool `river:"override,attr,optional"` + + // DetectorConfig is a list of settings specific to all detectors + DetectorConfig DetectorConfig `river:",squash"` + + // HTTP client settings for the detector + // Timeout default is 5s + Timeout time.Duration `river:"timeout,attr,optional"` + // Client otelcol.HTTPClientArguments `river:",squash"` + //TODO: Uncomment this later, and remove Timeout? + // Can we just get away with a timeout, or do we need all the http client settings? + // It seems that HTTP client settings are only used in the ec2 detection via ClientFromContext. + // This seems like a very niche use case, so for now I won't implement it in the Agent. + // If we do implement it in the Agent, I am not sure how to document the HTTP client settings. + // We'd have to mention that they're only for a very specific use case. + + // Output configures where to send processed data. Required. + Output *otelcol.ConsumerArguments `river:"output,block"` +} + +// DetectorConfig contains user-specified configurations unique to all individual detectors +type DetectorConfig struct { + // EC2Config contains user-specified configurations for the EC2 detector + EC2Config ec2.Config `river:"ec2,block,optional"` + + // ECSConfig contains user-specified configurations for the ECS detector + ECSConfig ecs.Config `river:"ecs,block,optional"` + + // EKSConfig contains user-specified configurations for the EKS detector + EKSConfig eks.Config `river:"eks,block,optional"` + + // Elasticbeanstalk contains user-specified configurations for the elasticbeanstalk detector + ElasticbeanstalkConfig elasticbeanstalk.Config `river:"elasticbeanstalk,block,optional"` + + // Lambda contains user-specified configurations for the lambda detector + LambdaConfig lambda.Config `river:"lambda,block,optional"` + + // Azure contains user-specified configurations for the azure detector + AzureConfig azure.Config `river:"azure,block,optional"` + + // Aks contains user-specified configurations for the aks detector + AksConfig aks.Config `river:"aks,block,optional"` + + // ConsulConfig contains user-specified configurations for the Consul detector + ConsulConfig consul.Config `river:"consul,block,optional"` + + // DockerConfig contains user-specified configurations for the docker detector + DockerConfig docker.Config `river:"docker,block,optional"` + + // GcpConfig contains user-specified configurations for the gcp detector + GcpConfig gcp.Config `river:"gcp,block,optional"` + + // HerokuConfig contains user-specified configurations for the heroku detector + HerokuConfig heroku.Config `river:"heroku,block,optional"` + + // SystemConfig contains user-specified configurations for the System detector + SystemConfig system.Config `river:"system,block,optional"` + + // OpenShift contains user-specified configurations for the Openshift detector + OpenShiftConfig openshift.Config `river:"openshift,block,optional"` + + // KubernetesNode contains user-specified configurations for the K8SNode detector + KubernetesNodeConfig kubernetes_node.Config `river:"kubernetes_node,block,optional"` +} + +var ( + _ processor.Arguments = Arguments{} + _ river.Validator = (*Arguments)(nil) + _ river.Defaulter = (*Arguments)(nil) +) + +// DefaultArguments holds default settings for Arguments. +var DefaultArguments = Arguments{ + Detectors: []string{"env"}, + Override: true, + Timeout: 5 * time.Second, + DetectorConfig: DetectorConfig{ + EC2Config: ec2.DefaultArguments, + ECSConfig: ecs.DefaultArguments, + EKSConfig: eks.DefaultArguments, + ElasticbeanstalkConfig: elasticbeanstalk.DefaultArguments, + LambdaConfig: lambda.DefaultArguments, + AzureConfig: azure.DefaultArguments, + AksConfig: aks.DefaultArguments, + ConsulConfig: consul.DefaultArguments, + DockerConfig: docker.DefaultArguments, + GcpConfig: gcp.DefaultArguments, + HerokuConfig: heroku.DefaultArguments, + SystemConfig: system.DefaultArguments, + OpenShiftConfig: openshift.DefaultArguments, + KubernetesNodeConfig: kubernetes_node.DefaultArguments, + }, +} + +// SetToDefault implements river.Defaulter. +func (args *Arguments) SetToDefault() { + *args = DefaultArguments +} + +// Validate implements river.Validator. +func (args *Arguments) Validate() error { + if len(args.Detectors) == 0 { + return fmt.Errorf("at least one detector must be specified") + } + + for _, detector := range args.Detectors { + switch detector { + case "env", + ec2.Name, + ecs.Name, + eks.Name, + elasticbeanstalk.Name, + lambda.Name, + azure.Name, + aks.Name, + consul.Name, + docker.Name, + gcp.Name, + heroku.Name, + system.Name, + openshift.Name, + k8snode.Name: + // Valid option - nothing to do + default: + return fmt.Errorf("invalid detector: %s", detector) + } + } + + return nil +} + +func (args Arguments) ConvertDetectors() []string { + if args.Detectors == nil { + return nil + } + + res := make([]string, 0, len(args.Detectors)) + for _, detector := range args.Detectors { + switch detector { + case k8snode.Name: + res = append(res, "k8snode") + default: + res = append(res, detector) + } + } + return res +} + +// Convert implements processor.Arguments. +func (args Arguments) Convert() (otelcomponent.Config, error) { + input := make(map[string]interface{}) + + input["detectors"] = args.ConvertDetectors() + input["override"] = args.Override + input["timeout"] = args.Timeout + + input["ec2"] = args.DetectorConfig.EC2Config.Convert() + input["ecs"] = args.DetectorConfig.ECSConfig.Convert() + input["eks"] = args.DetectorConfig.EKSConfig.Convert() + input["elasticbeanstalk"] = args.DetectorConfig.ElasticbeanstalkConfig.Convert() + input["lambda"] = args.DetectorConfig.LambdaConfig.Convert() + input["azure"] = args.DetectorConfig.AzureConfig.Convert() + input["aks"] = args.DetectorConfig.AksConfig.Convert() + input["consul"] = args.DetectorConfig.ConsulConfig.Convert() + input["docker"] = args.DetectorConfig.DockerConfig.Convert() + input["gcp"] = args.DetectorConfig.GcpConfig.Convert() + input["heroku"] = args.DetectorConfig.HerokuConfig.Convert() + input["system"] = args.DetectorConfig.SystemConfig.Convert() + input["openshift"] = args.DetectorConfig.OpenShiftConfig.Convert() + input["k8snode"] = args.DetectorConfig.KubernetesNodeConfig.Convert() + + var result resourcedetectionprocessor.Config + err := mapstructure.Decode(input, &result) + + if err != nil { + return nil, err + } + + return &result, nil +} + +// Extensions implements processor.Arguments. +func (args Arguments) Extensions() map[otelcomponent.ID]otelextension.Extension { + return nil +} + +// Exporters implements processor.Arguments. +func (args Arguments) Exporters() map[otelcomponent.DataType]map[otelcomponent.ID]otelcomponent.Component { + return nil +} + +// NextConsumers implements processor.Arguments. +func (args Arguments) NextConsumers() *otelcol.ConsumerArguments { + return args.Output +} diff --git a/component/otelcol/processor/resourcedetection/resourcedetection_test.go b/component/otelcol/processor/resourcedetection/resourcedetection_test.go new file mode 100644 index 000000000000..6fbbf0280e06 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/resourcedetection_test.go @@ -0,0 +1,1527 @@ +package resourcedetection_test + +import ( + "testing" + "time" + + "github.com/grafana/agent/component/otelcol/processor/resourcedetection" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ec2" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ecs" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/eks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/lambda" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure/aks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/consul" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/docker" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/gcp" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/heroku" + kubernetes_node "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/k8snode" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/openshift" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/system" + "github.com/grafana/river" + "github.com/mitchellh/mapstructure" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" + "github.com/stretchr/testify/require" +) + +func TestArguments_UnmarshalRiver(t *testing.T) { + tests := []struct { + testName string + cfg string + expected map[string]interface{} + errorMsg string + }{ + { + testName: "err_no_detector", + cfg: ` + detectors = [] + output {} + `, + errorMsg: "at least one detector must be specified", + }, + { + testName: "invalid_detector", + cfg: ` + detectors = ["non-existent-detector"] + output {} + `, + errorMsg: "invalid detector: non-existent-detector", + }, + { + testName: "invalid_detector_and_all_valid_ones", + cfg: ` + detectors = ["non-existent-detector2", "env", "ec2", "ecs", "eks", "elasticbeanstalk", "lambda", "azure", "aks", "consul", "docker", "gcp", "heroku", "system", "openshift", "kubernetes_node"] + output {} + `, + errorMsg: "invalid detector: non-existent-detector2", + }, + { + testName: "all_detectors_with_defaults", + cfg: ` + detectors = ["env", "ec2", "ecs", "eks", "elasticbeanstalk", "lambda", "azure", "aks", "consul", "docker", "gcp", "heroku", "system", "openshift", "kubernetes_node"] + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"env", "ec2", "ecs", "eks", "elasticbeanstalk", "lambda", "azure", "aks", "consul", "docker", "gcp", "heroku", "system", "openshift", "k8snode"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "default_detector", + cfg: ` + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"env"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ec2_defaults", + cfg: ` + detectors = ["ec2"] + ec2 { + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ec2"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + "host.id": map[string]interface{}{"enabled": true}, + "host.image.id": map[string]interface{}{"enabled": true}, + "host.name": map[string]interface{}{"enabled": true}, + "host.type": map[string]interface{}{"enabled": true}, + }, + }, + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ec2_defaults_empty_resource_attributes", + cfg: ` + detectors = ["ec2"] + ec2 { + resource_attributes {} + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ec2"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + "host.id": map[string]interface{}{"enabled": true}, + "host.image.id": map[string]interface{}{"enabled": true}, + "host.name": map[string]interface{}{"enabled": true}, + "host.type": map[string]interface{}{"enabled": true}, + }, + }, + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ec2_explicit", + cfg: ` + detectors = ["ec2"] + ec2 { + tags = ["^tag1$", "^tag2$", "^label.*$"] + resource_attributes { + cloud.account.id { enabled = true } + cloud.availability_zone { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = true } + cloud.region { enabled = true } + host.id { enabled = true } + host.image.id { enabled = false } + host.name { enabled = false } + host.type { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ec2"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": map[string]interface{}{ + "tags": []string{"^tag1$", "^tag2$", "^label.*$"}, + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + "host.id": map[string]interface{}{"enabled": true}, + "host.image.id": map[string]interface{}{"enabled": false}, + "host.name": map[string]interface{}{"enabled": false}, + "host.type": map[string]interface{}{"enabled": false}, + }, + }, + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ecs_defaults", + cfg: ` + detectors = ["ecs"] + ecs { + resource_attributes { + aws.ecs.cluster.arn { enabled = true } + aws.ecs.launchtype { enabled = true } + aws.ecs.task.arn { enabled = true } + aws.ecs.task.family { enabled = true } + aws.ecs.task.revision { enabled = true } + aws.log.group.arns { enabled = true } + aws.log.group.names { enabled = false } + // aws.log.stream.arns { enabled = true } + // aws.log.stream.names { enabled = true } + // cloud.account.id { enabled = true } + // cloud.availability_zone { enabled = true } + // cloud.platform { enabled = true } + // cloud.provider { enabled = true } + // cloud.region { enabled = true } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ecs"}, + "timeout": 5 * time.Second, + "override": true, + "ecs": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "aws.ecs.cluster.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.launchtype": map[string]interface{}{"enabled": true}, + "aws.ecs.task.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.task.family": map[string]interface{}{"enabled": true}, + "aws.ecs.task.revision": map[string]interface{}{"enabled": true}, + "aws.log.group.arns": map[string]interface{}{"enabled": true}, + "aws.log.group.names": map[string]interface{}{"enabled": false}, + "aws.log.stream.arns": map[string]interface{}{"enabled": true}, + "aws.log.stream.names": map[string]interface{}{"enabled": true}, + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ecs_explicit", + cfg: ` + detectors = ["ecs"] + ecs { + resource_attributes { + aws.ecs.cluster.arn { enabled = true } + aws.ecs.launchtype { enabled = true } + aws.ecs.task.arn { enabled = true } + aws.ecs.task.family { enabled = true } + aws.ecs.task.revision { enabled = true } + aws.log.group.arns { enabled = true } + aws.log.group.names { enabled = false } + // aws.log.stream.arns { enabled = true } + // aws.log.stream.names { enabled = true } + // cloud.account.id { enabled = true } + // cloud.availability_zone { enabled = true } + // cloud.platform { enabled = true } + // cloud.provider { enabled = true } + // cloud.region { enabled = true } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ecs"}, + "timeout": 5 * time.Second, + "override": true, + "ecs": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "aws.ecs.cluster.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.launchtype": map[string]interface{}{"enabled": true}, + "aws.ecs.task.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.task.family": map[string]interface{}{"enabled": true}, + "aws.ecs.task.revision": map[string]interface{}{"enabled": true}, + "aws.log.group.arns": map[string]interface{}{"enabled": true}, + "aws.log.group.names": map[string]interface{}{"enabled": false}, + "aws.log.stream.arns": map[string]interface{}{"enabled": true}, + "aws.log.stream.names": map[string]interface{}{"enabled": true}, + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "eks_defaults", + cfg: ` + detectors = ["eks"] + eks {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"eks"}, + "timeout": 5 * time.Second, + "override": true, + "eks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "eks_explicit", + cfg: ` + detectors = ["eks"] + eks { + resource_attributes { + cloud.platform { enabled = true } + cloud.provider { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"eks"}, + "timeout": 5 * time.Second, + "override": true, + "eks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "azure_defaults", + cfg: ` + detectors = ["azure"] + azure {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"azure"}, + "timeout": 5 * time.Second, + "override": true, + "azure": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "tags": []string{}, + "azure.resourcegroup.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.scaleset.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.size": map[string]interface{}{ + "enabled": true, + }, + "cloud.account.id": map[string]interface{}{ + "enabled": true, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": true, + }, + "host.id": map[string]interface{}{ + "enabled": true, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "azure_explicit", + cfg: ` + detectors = ["azure"] + azure { + resource_attributes { + azure.resourcegroup.name { enabled = true } + azure.vm.name { enabled = true } + azure.vm.scaleset.name { enabled = true } + azure.vm.size { enabled = true } + cloud.account.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"azure"}, + "timeout": 5 * time.Second, + "override": true, + "azure": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "tags": []string{}, + "azure.resourcegroup.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.scaleset.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.size": map[string]interface{}{ + "enabled": true, + }, + "cloud.account.id": map[string]interface{}{ + "enabled": false, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": true, + }, + "host.id": map[string]interface{}{ + "enabled": true, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "aks_defaults", + cfg: ` + detectors = ["aks"] + aks {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"aks"}, + "timeout": 5 * time.Second, + "override": true, + "aks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "aks_explicit", + cfg: ` + detectors = ["aks"] + aks { + resource_attributes { + cloud.platform { enabled = true } + cloud.provider { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"aks"}, + "timeout": 5 * time.Second, + "override": true, + "aks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "gcp_defaults", + cfg: ` + detectors = ["gcp"] + gcp {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"gcp"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "gcp_explicit", + cfg: ` + detectors = ["gcp"] + gcp { + resource_attributes { + cloud.account.id { enabled = true } + cloud.availability_zone { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = true } + cloud.region { enabled = false } + faas.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"gcp"}, + "timeout": 5 * time.Second, + "override": true, + "gcp": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{ + "enabled": true, + }, + "cloud.availability_zone": map[string]interface{}{ + "enabled": true, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "faas.id": map[string]interface{}{ + "enabled": false, + }, + "faas.instance": map[string]interface{}{ + "enabled": true, + }, + "faas.name": map[string]interface{}{ + "enabled": true, + }, + "faas.version": map[string]interface{}{ + "enabled": true, + }, + "gcp.cloud_run.job.execution": map[string]interface{}{ + "enabled": true, + }, + "gcp.cloud_run.job.task_index": map[string]interface{}{ + "enabled": true, + }, + "gcp.gce.instance.hostname": map[string]interface{}{ + "enabled": false, + }, + "gcp.gce.instance.name": map[string]interface{}{ + "enabled": false, + }, + "host.id": map[string]interface{}{ + "enabled": true, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + "host.type": map[string]interface{}{ + "enabled": true, + }, + "k8s.cluster.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "docker_defaults", + cfg: ` + detectors = ["docker"] + docker {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"docker"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "docker_explicit", + cfg: ` + detectors = ["docker"] + docker { + resource_attributes { + host.name { enabled = true } + os.type { enabled = false } + + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"docker"}, + "timeout": 5 * time.Second, + "override": true, + "docker": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "host.name": map[string]interface{}{ + "enabled": true, + }, + "os.type": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "lambda_defaults", + cfg: ` + detectors = ["lambda"] + lambda {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"lambda"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "lambda_explicit", + cfg: ` + detectors = ["lambda"] + lambda { + resource_attributes { + aws.log.group.names { enabled = true } + aws.log.stream.names { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = false } + cloud.region { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"lambda"}, + "timeout": 5 * time.Second, + "override": true, + "lambda": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "aws.log.group.names": map[string]interface{}{ + "enabled": true, + }, + "aws.log.stream.names": map[string]interface{}{ + "enabled": true, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": false, + }, + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "faas.instance": map[string]interface{}{ + "enabled": true, + }, + "faas.max_memory": map[string]interface{}{ + "enabled": true, + }, + "faas.name": map[string]interface{}{ + "enabled": true, + }, + "faas.version": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "elasticbeanstalk_defaults", + cfg: ` + detectors = ["elasticbeanstalk"] + elasticbeanstalk {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"elasticbeanstalk"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "elasticbeanstalk_explicit", + cfg: ` + detectors = ["elasticbeanstalk"] + elasticbeanstalk { + resource_attributes { + cloud.platform { enabled = true } + cloud.provider { enabled = true } + deployment.environment { enabled = true } + service.instance.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"elasticbeanstalk"}, + "timeout": 5 * time.Second, + "override": true, + "elasticbeanstalk": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "deployment.environment": map[string]interface{}{ + "enabled": true, + }, + "service.instance.id": map[string]interface{}{ + "enabled": false, + }, + "service.version": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "consul_defaults", + cfg: ` + detectors = ["consul"] + consul {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"consul"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "consul_explicit", + cfg: ` + detectors = ["consul"] + consul { + address = "localhost:8500" + datacenter = "dc1" + token = "secret_token" + namespace = "test_namespace" + meta = ["test"] + resource_attributes { + cloud.region { enabled = false } + host.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"consul"}, + "timeout": 5 * time.Second, + "override": true, + "consul": map[string]interface{}{ + "address": "localhost:8500", + "datacenter": "dc1", + "token": "secret_token", + "namespace": "test_namespace", + "meta": map[string]string{"test": ""}, + "resource_attributes": map[string]interface{}{ + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "host.id": map[string]interface{}{ + "enabled": false, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "heroku_defaults", + cfg: ` + detectors = ["heroku"] + heroku {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"heroku"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "heroku_explicit", + cfg: ` + detectors = ["heroku"] + heroku { + resource_attributes { + cloud.provider { enabled = true } + heroku.app.id { enabled = true } + heroku.dyno.id { enabled = true } + heroku.release.commit { enabled = true } + heroku.release.creation_timestamp { enabled = false } + service.instance.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"heroku"}, + "timeout": 5 * time.Second, + "override": true, + "heroku": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "heroku.app.id": map[string]interface{}{ + "enabled": true, + }, + "heroku.dyno.id": map[string]interface{}{ + "enabled": true, + }, + "heroku.release.commit": map[string]interface{}{ + "enabled": true, + }, + "heroku.release.creation_timestamp": map[string]interface{}{ + "enabled": false, + }, + "service.instance.id": map[string]interface{}{ + "enabled": false, + }, + "service.name": map[string]interface{}{ + "enabled": true, + }, + "service.version": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "kubernetes_node_defaults", + cfg: ` + detectors = ["kubernetes_node"] + kubernetes_node {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"k8snode"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "kubernetes_node_explicit", + cfg: ` + detectors = ["kubernetes_node"] + kubernetes_node { + auth_type = "kubeConfig" + context = "fake_ctx" + node_from_env_var = "MY_CUSTOM_VAR" + resource_attributes { + k8s.node.name { enabled = true } + k8s.node.uid { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"k8snode"}, + "timeout": 5 * time.Second, + "override": true, + "k8snode": map[string]interface{}{ + "auth_type": "kubeConfig", + "context": "fake_ctx", + "node_from_env_var": "MY_CUSTOM_VAR", + "resource_attributes": map[string]interface{}{ + "k8s.node.name": map[string]interface{}{ + "enabled": true, + }, + "k8s.node.uid": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + }, + }, + { + testName: "system_invalid_hostname_source", + cfg: ` + detectors = ["system"] + system { + hostname_sources = ["asdf"] + resource_attributes { } + } + output {} + `, + errorMsg: "invalid hostname source: asdf", + }, + { + testName: "system_defaults", + cfg: ` + detectors = ["system"] + system {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"system"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "system_explicit", + cfg: ` + detectors = ["system"] + system { + hostname_sources = ["cname","lookup"] + resource_attributes { + host.arch { enabled = true } + host.cpu.cache.l2.size { enabled = true } + host.cpu.family { enabled = true } + host.cpu.model.id { enabled = true } + host.cpu.model.name { enabled = true } + host.cpu.stepping { enabled = true } + host.cpu.vendor.id { enabled = false } + host.id { enabled = false } + host.name { enabled = false } + // os.description { enabled = false } + // os.type { enabled = true } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"system"}, + "timeout": 5 * time.Second, + "override": true, + "system": map[string]interface{}{ + "hostname_sources": []string{"cname", "lookup"}, + "resource_attributes": map[string]interface{}{ + "host.arch": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.cache.l2.size": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.family": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.model.id": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.model.name": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.stepping": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.vendor.id": map[string]interface{}{ + "enabled": false, + }, + "host.id": map[string]interface{}{ + "enabled": false, + }, + "host.name": map[string]interface{}{ + "enabled": false, + }, + "os.description": map[string]interface{}{ + "enabled": false, + }, + "os.type": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "openshift_default", + cfg: ` + detectors = ["openshift"] + openshift {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"openshift"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "openshift_explicit", + cfg: ` + detectors = ["openshift"] + timeout = "7s" + override = false + openshift { + address = "127.0.0.1:4444" + token = "some_token" + tls { + insecure = true + } + resource_attributes { + cloud.platform { + enabled = true + } + cloud.provider { + enabled = true + } + cloud.region { + enabled = false + } + k8s.cluster.name { + enabled = false + } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"openshift"}, + "timeout": 7 * time.Second, + "override": false, + "openshift": map[string]interface{}{ + "address": "127.0.0.1:4444", + "token": "some_token", + "tls": map[string]interface{}{ + "insecure": true, + }, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "k8s.cluster.name": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "env", + cfg: ` + detectors = ["env"] + timeout = "7s" + override = false + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"env"}, + "timeout": 7 * time.Second, + "override": false, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args resourcedetection.Arguments + err := river.Unmarshal([]byte(tc.cfg), &args) + if tc.errorMsg != "" { + require.ErrorContains(t, err, tc.errorMsg) + return + } + + require.NoError(t, err) + + actualPtr, err := args.Convert() + require.NoError(t, err) + + actual := actualPtr.(*resourcedetectionprocessor.Config) + + var expected resourcedetectionprocessor.Config + err = mapstructure.Decode(tc.expected, &expected) + require.NoError(t, err) + + require.Equal(t, expected, *actual) + }) + } +} diff --git a/component/prometheus/scrape/scrape.go b/component/prometheus/scrape/scrape.go index 175f09ebf2a4..56e9342a437e 100644 --- a/component/prometheus/scrape/scrape.go +++ b/component/prometheus/scrape/scrape.go @@ -56,6 +56,8 @@ type Arguments struct { HonorLabels bool `river:"honor_labels,attr,optional"` // Indicator whether the scraped timestamps should be respected. HonorTimestamps bool `river:"honor_timestamps,attr,optional"` + // Indicator whether to track the staleness of the scraped timestamps. + TrackTimestampsStaleness bool `river:"track_timestamps_staleness,attr,optional"` // A set of query parameters with which the target is scraped. Params url.Values `river:"params,attr,optional"` // Whether to scrape a classic histogram that is also exposed as a native histogram. @@ -99,13 +101,14 @@ type Arguments struct { // SetToDefault implements river.Defaulter. func (arg *Arguments) SetToDefault() { *arg = Arguments{ - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: component_config.DefaultHTTPClientConfig, - ScrapeInterval: 1 * time.Minute, // From config.DefaultGlobalConfig - ScrapeTimeout: 10 * time.Second, // From config.DefaultGlobalConfig + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + TrackTimestampsStaleness: false, + HTTPClientConfig: component_config.DefaultHTTPClientConfig, + ScrapeInterval: 1 * time.Minute, // From config.DefaultGlobalConfig + ScrapeTimeout: 10 * time.Second, // From config.DefaultGlobalConfig } } @@ -339,6 +342,7 @@ func getPromScrapeConfigs(jobName string, c Arguments) *config.ScrapeConfig { } dec.HonorLabels = c.HonorLabels dec.HonorTimestamps = c.HonorTimestamps + dec.TrackTimestampsStaleness = c.TrackTimestampsStaleness dec.Params = c.Params dec.ScrapeClassicHistograms = c.ScrapeClassicHistograms dec.ScrapeInterval = model.Duration(c.ScrapeInterval) diff --git a/component/prometheus/scrape/scrape_test.go b/component/prometheus/scrape/scrape_test.go index 3a5ea459bcce..bb7cef641f41 100644 --- a/component/prometheus/scrape/scrape_test.go +++ b/component/prometheus/scrape/scrape_test.go @@ -29,6 +29,7 @@ func TestRiverConfig(t *testing.T) { forward_to = [] scrape_interval = "10s" job_name = "local" + track_timestamps_staleness = true bearer_token = "token" proxy_url = "http://0.0.0.0:11111" diff --git a/component/pyroscope/ebpf/args.go b/component/pyroscope/ebpf/args.go index facf9129d6ba..c4c444b917f2 100644 --- a/component/pyroscope/ebpf/args.go +++ b/component/pyroscope/ebpf/args.go @@ -10,8 +10,6 @@ import ( type Arguments struct { ForwardTo []pyroscope.Appendable `river:"forward_to,attr"` Targets []discovery.Target `river:"targets,attr,optional"` - DefaultTarget discovery.Target `river:"default_target,attr,optional"` // undocumented, keeping it until we have other sd - TargetsOnly bool `river:"targets_only,attr,optional"` // undocumented, keeping it until we have other sd CollectInterval time.Duration `river:"collect_interval,attr,optional"` SampleRate int `river:"sample_rate,attr,optional"` PidCacheSize int `river:"pid_cache_size,attr,optional"` diff --git a/component/pyroscope/ebpf/ebpf_linux.go b/component/pyroscope/ebpf/ebpf_linux.go index b8b1afbecf59..8d201ac488f1 100644 --- a/component/pyroscope/ebpf/ebpf_linux.go +++ b/component/pyroscope/ebpf/ebpf_linux.go @@ -82,7 +82,6 @@ func defaultArguments() Arguments { CacheRounds: 3, CollectUserProfile: true, CollectKernelProfile: true, - TargetsOnly: true, Demangle: "none", PythonEnabled: true, } @@ -226,8 +225,7 @@ func targetsOptionFromArgs(args Arguments) sd.TargetsOptions { } return sd.TargetsOptions{ Targets: targets, - DefaultTarget: sd.DiscoveryTarget(args.DefaultTarget), - TargetsOnly: args.TargetsOnly, + TargetsOnly: true, ContainerCacheSize: args.ContainerIDCacheSize, } } diff --git a/component/pyroscope/java/args.go b/component/pyroscope/java/args.go new file mode 100644 index 000000000000..e1eb781f0045 --- /dev/null +++ b/component/pyroscope/java/args.go @@ -0,0 +1,43 @@ +package java + +import ( + "time" + + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/pyroscope" +) + +type Arguments struct { + Targets []discovery.Target `river:"targets,attr"` + ForwardTo []pyroscope.Appendable `river:"forward_to,attr"` + + TmpDir string `river:"tmp_dir,attr,optional"` + ProfilingConfig ProfilingConfig `river:"profiling_config,block,optional"` +} + +type ProfilingConfig struct { + Interval time.Duration `river:"interval,attr,optional"` + SampleRate int `river:"sample_rate,attr,optional"` + Alloc string `river:"alloc,attr,optional"` + Lock string `river:"lock,attr,optional"` + CPU bool `river:"cpu,attr,optional"` +} + +func (rc *Arguments) UnmarshalRiver(f func(interface{}) error) error { + *rc = defaultArguments() + type config Arguments + return f((*config)(rc)) +} + +func defaultArguments() Arguments { + return Arguments{ + TmpDir: "/tmp", + ProfilingConfig: ProfilingConfig{ + Interval: 60 * time.Second, + SampleRate: 100, + Alloc: "10ms", + Lock: "512k", + CPU: true, + }, + } +} diff --git a/component/pyroscope/java/asprof/asprof.go b/component/pyroscope/java/asprof/asprof.go new file mode 100644 index 000000000000..e75a8a3ed502 --- /dev/null +++ b/component/pyroscope/java/asprof/asprof.go @@ -0,0 +1,281 @@ +//go:build linux && (amd64 || arm64) + +package asprof + +import ( + "bytes" + "crypto/sha1" + _ "embed" + "encoding/hex" + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/prometheus/procfs" +) + +var fsMutex sync.Mutex + +// separte dirs for glibc & musl +type Distribution struct { + extractedDir string + version int +} + +func (d *Distribution) binaryLauncher() bool { + return d.version >= 210 +} + +func (d *Distribution) LibPath() string { + if d.binaryLauncher() { + return filepath.Join(d.extractedDir, "lib/libasyncProfiler.so") + } + return filepath.Join(d.extractedDir, "build/libasyncProfiler.so") +} + +func (d *Distribution) JattachPath() string { + if d.binaryLauncher() { + return "" + } + return filepath.Join(d.extractedDir, "build/jattach") +} + +func (d *Distribution) LauncherPath() string { + if d.binaryLauncher() { + return filepath.Join(d.extractedDir, "bin/asprof") + } + return filepath.Join(d.extractedDir, "profiler.sh") +} + +type Profiler struct { + tmpDir string + extractOnce sync.Once + glibcDist *Distribution + muslDist *Distribution + extractError error + tmpDirMarker any + archiveHash string + archive Archive +} + +type Archive struct { + data []byte + version int +} + +func NewProfiler(tmpDir string, archive Archive) *Profiler { + res := &Profiler{tmpDir: tmpDir, glibcDist: new(Distribution), muslDist: new(Distribution), tmpDirMarker: "grafana-agent-asprof"} + sum := sha1.Sum(archive.data) + hexSum := hex.EncodeToString(sum[:]) + res.archiveHash = hexSum + res.glibcDist.version = archive.version + res.muslDist.version = archive.version + res.archive = archive + return res +} + +func (p *Profiler) Execute(dist *Distribution, argv []string) (string, string, error) { + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + + exe := dist.LauncherPath() + cmd := exec.Command(exe, argv...) + + cmd.Stdout = stdout + cmd.Stderr = stderr + err := cmd.Start() + if err != nil { + return stdout.String(), stderr.String(), fmt.Errorf("asprof failed to start %s: %w", exe, err) + } + err = cmd.Wait() + if err != nil { + return stdout.String(), stderr.String(), fmt.Errorf("asprof failed to run %s: %w", exe, err) + } + return stdout.String(), stderr.String(), nil +} + +func (p *Profiler) CopyLib(dist *Distribution, pid int) error { + fsMutex.Lock() + defer fsMutex.Unlock() + libData, err := os.ReadFile(dist.LibPath()) + if err != nil { + return err + } + launcherData, err := os.ReadFile(dist.LauncherPath()) + if err != nil { + return err + } + procRoot := ProcessPath("/", pid) + procRootFile, err := os.Open(procRoot) + if err != nil { + return fmt.Errorf("failed to open proc root %s: %w", procRoot, err) + } + dstLibPath := strings.TrimPrefix(dist.LibPath(), "/") + dstLauncherPath := strings.TrimPrefix(dist.LauncherPath(), "/") + if err = writeFile(procRootFile, dstLibPath, libData, false); err != nil { + return err + } + // this is to create bin directory, we dont actually need to write anything there, and we dont execute launcher there + if err = writeFile(procRootFile, dstLauncherPath, launcherData, false); err != nil { + return err + } + return nil +} + +func (p *Profiler) DistributionForProcess(pid int) (*Distribution, error) { + proc, err := procfs.NewProc(pid) + if err != nil { + return nil, fmt.Errorf("failed to select dist for pid %d %w", pid, err) + } + maps, err := proc.ProcMaps() + if err != nil { + return nil, fmt.Errorf("failed to select dist for pid %d %w", pid, err) + } + musl := false + glibc := false + for _, m := range maps { + if isMuslMapping(m) { + musl = true + } + if isGlibcMapping(m) { + glibc = true + } + } + if musl && glibc { + return nil, fmt.Errorf("failed to select dist for pid %d: both musl and glibc found", pid) + } + if musl { + return p.muslDist, nil + } + if glibc { + return p.glibcDist, nil + } + if _, err := os.Stat(ProcessPath("/lib/ld-musl-x86_64.so.1", pid)); err == nil { + return p.muslDist, nil + } + if _, err := os.Stat(ProcessPath("/lib/ld-musl-aarch64.so.1", pid)); err == nil { + return p.muslDist, nil + } + if _, err := os.Stat(ProcessPath("/lib64/ld-linux-x86-64.so.2", pid)); err == nil { + return p.glibcDist, nil + } + return nil, fmt.Errorf("failed to select dist for pid %d: neither musl nor glibc found", pid) +} + +func isMuslMapping(m *procfs.ProcMap) bool { + if strings.Contains(m.Pathname, "/lib/ld-musl-x86_64.so.1") { + return true + } + if strings.Contains(m.Pathname, "/lib/ld-musl-aarch64.so.1") { + return true + } + return false +} + +func isGlibcMapping(m *procfs.ProcMap) bool { + if strings.HasSuffix(m.Pathname, "/libc.so.6") { + return true + } + if strings.Contains(m.Pathname, "x86_64-linux-gnu/libc-") { + return true + } + return false +} + +func (p *Profiler) ExtractDistributions() error { + p.extractOnce.Do(func() { + p.extractError = p.extractDistributions() + }) + return p.extractError +} + +func (p *Profiler) extractDistributions() error { + fsMutex.Lock() + defer fsMutex.Unlock() + muslDistName, glibcDistName := p.getDistNames() + + var launcher, jattach, glibc, musl []byte + err := readTarGZ(p.archive.data, func(name string, fi fs.FileInfo, data []byte) error { + if name == "profiler.sh" || name == "asprof" { + launcher = data + } + if name == "jattach" { + jattach = data + } + if strings.Contains(name, "glibc/libasyncProfiler.so") { + glibc = data + } + if strings.Contains(name, "musl/libasyncProfiler.so") { + musl = data + } + return nil + }) + if err != nil { + return err + } + if launcher == nil || glibc == nil || musl == nil { + return fmt.Errorf("failed to find libasyncProfiler in tar.gz") + } + if !p.glibcDist.binaryLauncher() { + if jattach == nil { + return fmt.Errorf("failed to find jattach in tar.gz") + } + } + + fileMap := map[string][]byte{} + fileMap[filepath.Join(glibcDistName, p.glibcDist.LauncherPath())] = launcher + fileMap[filepath.Join(glibcDistName, p.glibcDist.LibPath())] = glibc + fileMap[filepath.Join(muslDistName, p.muslDist.LauncherPath())] = launcher + fileMap[filepath.Join(muslDistName, p.muslDist.LibPath())] = musl + if !p.glibcDist.binaryLauncher() { + fileMap[filepath.Join(glibcDistName, p.glibcDist.JattachPath())] = jattach + fileMap[filepath.Join(muslDistName, p.muslDist.JattachPath())] = jattach + } + tmpDirFile, err := os.Open(p.tmpDir) + if err != nil { + return fmt.Errorf("failed to open tmp dir %s: %w", p.tmpDir, err) + } + defer tmpDirFile.Close() + + if err = checkTempDirPermissions(tmpDirFile); err != nil { + return err + } + + for path, data := range fileMap { + if err = writeFile(tmpDirFile, path, data, true); err != nil { + return err + } + } + p.glibcDist.extractedDir = filepath.Join(p.tmpDir, glibcDistName) + p.muslDist.extractedDir = filepath.Join(p.tmpDir, muslDistName) + return nil +} + +func (p *Profiler) getDistNames() (string, string) { + muslDistName := fmt.Sprintf("%s-%s-%s", p.tmpDirMarker, + "musl", + p.archiveHash) + glibcDistName := fmt.Sprintf("%s-%s-%s", p.tmpDirMarker, + "glibc", + p.archiveHash) + return muslDistName, glibcDistName +} + +func ProcessPath(path string, pid int) string { + f := ProcFile{path, pid} + return f.ProcRootPath() +} + +type ProcFile struct { + Path string + PID int +} + +func (f *ProcFile) ProcRootPath() string { + return filepath.Join("/proc", strconv.Itoa(f.PID), "root", f.Path) +} diff --git a/component/pyroscope/java/asprof/asprof_linux_amd64.go b/component/pyroscope/java/asprof/asprof_linux_amd64.go new file mode 100644 index 000000000000..7d405539cda6 --- /dev/null +++ b/component/pyroscope/java/asprof/asprof_linux_amd64.go @@ -0,0 +1,18 @@ +//go:build linux && amd64 + +package asprof + +import ( + _ "embed" +) + +//go:embed async-profiler-3.0-linux-x64.tar.gz +var embededArchiveData []byte + +// asprof +// glibc / libasyncProfiler.so +// musl / libasyncProfiler.so + +var embededArchiveVersion = 300 + +var EmbeddedArchive = Archive{data: embededArchiveData, version: embededArchiveVersion} diff --git a/component/pyroscope/java/asprof/asprof_linux_arm64.go b/component/pyroscope/java/asprof/asprof_linux_arm64.go new file mode 100644 index 000000000000..e6978f02b995 --- /dev/null +++ b/component/pyroscope/java/asprof/asprof_linux_arm64.go @@ -0,0 +1,18 @@ +//go:build linux && arm64 + +package asprof + +import ( + _ "embed" +) + +//go:embed async-profiler-3.0-linux-arm64.tar.gz +var embededArchiveData []byte + +// asprof +// glibc / libasyncProfiler.so +// musl / libasyncProfiler.so + +var embededArchiveVersion = 300 + +var EmbeddedArchive = Archive{data: embededArchiveData, version: embededArchiveVersion} diff --git a/component/pyroscope/java/asprof/asprof_test.go b/component/pyroscope/java/asprof/asprof_test.go new file mode 100644 index 000000000000..297665d024f5 --- /dev/null +++ b/component/pyroscope/java/asprof/asprof_test.go @@ -0,0 +1,86 @@ +//go:build linux + +package asprof + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +// extracting to /tmp +// /tmp dir should be sticky or owned 0700 by the current user +// /tmp/dist-... dir should be owned 0700 by the current user and should not be a symlink +// the rest should use mkdirAt, openAt + +// test /tmp/dist-... is not symlink to /proc/conatinerpid/root/tmp/dist- +// test /tmp/dist-... is not symlink to /../../../foo + +// write skippable tests with uid=0 +func TestStickyDir(t *testing.T) { + dir := "/tmp" + p := NewProfiler(dir, EmbeddedArchive) + p.tmpDirMarker = fmt.Sprintf("grafana-agent-asprof-%s", uuid.NewString()) + t.Logf("tmpDirMarker: %s", p.tmpDirMarker) + err := p.ExtractDistributions() + assert.NoError(t, err) +} + +func TestOwnedDir(t *testing.T) { + dir := tempDir(t) + err := os.Chmod(dir, 0755) + assert.NoError(t, err) + p := NewProfiler(dir, EmbeddedArchive) + err = p.ExtractDistributions() + assert.NoError(t, err) +} + +func TestOwnedDirWrongPermission(t *testing.T) { + dir := tempDir(t) + err := os.Chmod(dir, 0777) + assert.NoError(t, err) + p := NewProfiler(dir, EmbeddedArchive) + err = p.ExtractDistributions() + assert.Error(t, err) +} + +func TestDistSymlink(t *testing.T) { + // check if /tmp/dist-... is a symlink + td := []bool{true, false} + for _, glibc := range td { + t.Run(fmt.Sprintf("glibc=%t", glibc), func(t *testing.T) { + root := tempDir(t) + err := os.Chmod(root, 0755) + assert.NoError(t, err) + manipulated := tempDir(t) + err = os.Chmod(manipulated, 0755) + assert.NoError(t, err) + p := NewProfiler(root, EmbeddedArchive) + muslDistName, glibcDistName := p.getDistNames() + + if glibc { + err = os.Symlink(manipulated, filepath.Join(root, muslDistName)) + assert.NoError(t, err) + } else { + err = os.Symlink(manipulated, filepath.Join(root, glibcDistName)) + assert.NoError(t, err) + } + + err = p.ExtractDistributions() + t.Logf("expected %s", err) + assert.Error(t, err) + }) + } +} + +func tempDir(t *testing.T) string { + t.Helper() + dir, err := os.MkdirTemp("", "asprof-test") + assert.NoError(t, err) + t.Logf("dir: %s", dir) + return dir +} diff --git a/component/pyroscope/java/asprof/async-profiler-3.0-linux-arm64.tar.gz b/component/pyroscope/java/asprof/async-profiler-3.0-linux-arm64.tar.gz new file mode 100644 index 000000000000..fcab1a963d7a Binary files /dev/null and b/component/pyroscope/java/asprof/async-profiler-3.0-linux-arm64.tar.gz differ diff --git a/component/pyroscope/java/asprof/async-profiler-3.0-linux-x64.tar.gz b/component/pyroscope/java/asprof/async-profiler-3.0-linux-x64.tar.gz new file mode 100644 index 000000000000..c4386b482792 Binary files /dev/null and b/component/pyroscope/java/asprof/async-profiler-3.0-linux-x64.tar.gz differ diff --git a/component/pyroscope/java/asprof/extract.go b/component/pyroscope/java/asprof/extract.go new file mode 100644 index 000000000000..6e912cff5169 --- /dev/null +++ b/component/pyroscope/java/asprof/extract.go @@ -0,0 +1,190 @@ +//go:build linux && (amd64 || arm64) + +package asprof + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "syscall" + + //"path/filepath" + + "github.com/klauspost/compress/gzip" + "golang.org/x/sys/unix" +) + +const extractPerm = 0755 + +func readTarGZ(buf []byte, cb func(name string, fi fs.FileInfo, data []byte) error) error { + gzipReader, err := gzip.NewReader(bytes.NewReader(buf)) + if err != nil { + return err + } + defer gzipReader.Close() + + tarReader := tar.NewReader(gzipReader) + + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + fileInfo := header.FileInfo() + if fileInfo.IsDir() { + continue + } + buffer, err := io.ReadAll(tarReader) + if err != nil { + return err + } + err = cb(header.Name, fileInfo, buffer) + if err != nil { + return err + } + } + + return nil +} + +func writeFile(dir *os.File, path string, data []byte, doOwnershipChecks bool) error { + pl := strings.Split(path, string(filepath.Separator)) + it := dir + dirPathParts := pl[:len(pl)-1] + fname := pl[len(pl)-1] + for _, part := range dirPathParts { + f, err := openAt(it, part, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_NOFOLLOW, 0) + if err != nil { + err = unix.Mkdirat(int(it.Fd()), part, extractPerm) + if err != nil { + return fmt.Errorf("failed to create directory %s %s: %w", path, part, err) + } + f, err = openAt(it, part, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_NOFOLLOW, 0) + if err != nil { + return fmt.Errorf("failed to open directory %s %s: %w", path, part, err) + } + } + defer f.Close() + if doOwnershipChecks { + if err = checkExtractFile(f, it); err != nil { + return err + } + } + it = f + } + f, err := openAt(it, fname, unix.O_RDONLY|unix.O_NOFOLLOW, 0) + if err != nil { + return writeFileData(it, fname, path, data, doOwnershipChecks) + } + defer f.Close() + if doOwnershipChecks { + if err = checkExtractFile(f, it); err != nil { + return err + } + } + return checkFileData(f, path, data) +} + +func checkFileData(f *os.File, path string, data []byte) error { + prevData, err := io.ReadAll(f) + if err != nil { + return fmt.Errorf("failed to read file %s : %w", path, err) + } + if !bytes.Equal(prevData, data) { + return fmt.Errorf("file %s already exists and is different", path) + } + return nil +} + +func writeFileData(it *os.File, fname string, path string, data []byte, doOwnershipChecks bool) error { + f, err := openAt(it, fname, unix.O_WRONLY|unix.O_CREAT|unix.O_EXCL|unix.O_NOFOLLOW, extractPerm) + if err != nil { + return fmt.Errorf("failed to create file %s %s: %w", path, fname, err) + } + defer f.Close() + if doOwnershipChecks { + if err = checkExtractFile(f, it); err != nil { + return err + } + } + if _, err = f.Write(data); err != nil { + return fmt.Errorf("failed to write file %s %s: %w", path, fname, err) + } + return nil +} + +func openAt(f *os.File, path string, flags int, mode uint32) (*os.File, error) { + fd, err := unix.Openat(int(f.Fd()), path, flags, mode) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), filepath.Join(f.Name(), path)), nil +} + +func checkTempDirPermissions(tmpDirFile *os.File) error { + tmpDirFileStat, err := tmpDirFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat tmp dir %s: %w", tmpDirFile.Name(), err) + } + if !tmpDirFileStat.IsDir() { + return fmt.Errorf("tmp dir %s is not a directory", tmpDirFile.Name()) + } + sys := tmpDirFileStat.Sys().(*syscall.Stat_t) + ok := false + if sys.Uid == uint32(os.Getuid()) && tmpDirFileStat.Mode().Perm() == extractPerm { + ok = true + } else if sys.Uid == 0 && tmpDirFileStat.Mode()&os.ModeSticky != 0 { + ok = true + } + if !ok { + return fmt.Errorf("tmp dir %s has wrong permissions %+v", tmpDirFile.Name(), sys) + } + return nil +} + +func checkExtractFile(f *os.File, parent *os.File) error { + parentStat, err := parent.Stat() + if err != nil { + return fmt.Errorf("failed to stat %s: %w", f.Name(), err) + } + stat, err := f.Stat() + if err != nil { + return fmt.Errorf("failed to stat %s: %w", f.Name(), err) + } + sys := stat.Sys().(*syscall.Stat_t) + parentSys := parentStat.Sys().(*syscall.Stat_t) + + ok := false + if sys.Uid == uint32(os.Getuid()) && stat.Mode().Perm() == extractPerm { + ok = true + } + if !ok { + return fmt.Errorf(" %s has wrong permissions %+v", f.Name(), sys) + } + if sys.Dev != parentSys.Dev { + return fmt.Errorf(" %s has wrong device %+v %+v", f.Name(), sys, parentSys) + } + + actualPath, err := readlinkFD(f) + if err != nil { + return fmt.Errorf("failed to readlink %s: %w", f.Name(), err) + } + expectedPath := f.Name() + if actualPath != expectedPath { + return fmt.Errorf("expected %s, but it is %s", expectedPath, actualPath) + } + return nil +} + +func readlinkFD(f *os.File) (string, error) { + return os.Readlink(fmt.Sprintf("/proc/self/fd/%d", f.Fd())) +} diff --git a/component/pyroscope/java/java.go b/component/pyroscope/java/java.go new file mode 100644 index 000000000000..efedd901fbd9 --- /dev/null +++ b/component/pyroscope/java/java.go @@ -0,0 +1,121 @@ +//go:build linux && (amd64 || arm64) + +package java + +import ( + "context" + "fmt" + "os" + "strconv" + "sync" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/pyroscope" + "github.com/grafana/agent/component/pyroscope/java/asprof" + "github.com/grafana/agent/pkg/flow/logging/level" +) + +const ( + labelProcessID = "__process_pid__" +) + +func init() { + component.Register(component.Registration{ + Name: "pyroscope.java", + Args: Arguments{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + if os.Getuid() != 0 { + return nil, fmt.Errorf("java profiler: must be run as root") + } + a := args.(Arguments) + var profiler = asprof.NewProfiler(a.TmpDir, asprof.EmbeddedArchive) + err := profiler.ExtractDistributions() + if err != nil { + return nil, fmt.Errorf("extract async profiler: %w", err) + } + + forwardTo := pyroscope.NewFanout(a.ForwardTo, opts.ID, opts.Registerer) + c := &javaComponent{ + opts: opts, + args: a, + forwardTo: forwardTo, + profiler: profiler, + pid2process: make(map[int]*profilingLoop), + } + c.updateTargets(a) + return c, nil + }, + }) +} + +type javaComponent struct { + opts component.Options + args Arguments + forwardTo *pyroscope.Fanout + + mutex sync.Mutex + pid2process map[int]*profilingLoop + profiler *asprof.Profiler +} + +func (j *javaComponent) Run(ctx context.Context) error { + defer func() { + j.stop() + }() + <-ctx.Done() + return nil +} + +func (j *javaComponent) Update(args component.Arguments) error { + newArgs := args.(Arguments) + j.forwardTo.UpdateChildren(newArgs.ForwardTo) + j.updateTargets(newArgs) + return nil +} + +func (j *javaComponent) updateTargets(args Arguments) { + j.mutex.Lock() + defer j.mutex.Unlock() + j.args = args + + active := make(map[int]struct{}) + for _, target := range args.Targets { + pid, err := strconv.Atoi(target[labelProcessID]) + _ = level.Debug(j.opts.Logger).Log("msg", "active target", + "target", fmt.Sprintf("%+v", target), + "pid", pid) + if err != nil { + _ = level.Error(j.opts.Logger).Log("msg", "invalid target", "target", fmt.Sprintf("%v", target), "err", err) + continue + } + proc := j.pid2process[pid] + if proc == nil { + proc = newProfilingLoop(pid, target, j.opts.Logger, j.profiler, j.forwardTo, j.args.ProfilingConfig) + _ = level.Debug(j.opts.Logger).Log("msg", "new process", "target", fmt.Sprintf("%+v", target)) + j.pid2process[pid] = proc + } else { + proc.update(target, j.args.ProfilingConfig) + } + active[pid] = struct{}{} + } + for pid := range j.pid2process { + if _, ok := active[pid]; ok { + continue + } + _ = level.Debug(j.opts.Logger).Log("msg", "inactive target", "pid", pid) + _ = j.pid2process[pid].Close() + delete(j.pid2process, pid) + } +} + +func (j *javaComponent) stop() { + _ = level.Debug(j.opts.Logger).Log("msg", "stopping") + j.mutex.Lock() + defer j.mutex.Unlock() + for _, proc := range j.pid2process { + proc.Close() + _ = level.Debug(j.opts.Logger).Log("msg", "stopped", "pid", proc.pid) + delete(j.pid2process, proc.pid) + } +} diff --git a/component/pyroscope/java/java_stub.go b/component/pyroscope/java/java_stub.go new file mode 100644 index 000000000000..26eaa30f7c0c --- /dev/null +++ b/component/pyroscope/java/java_stub.go @@ -0,0 +1,37 @@ +//go:build (linux && !(amd64 || arm64)) || !linux + +package java + +import ( + "context" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/pkg/flow/logging/level" +) + +func init() { + component.Register(component.Registration{ + Name: "pyroscope.java", + Args: Arguments{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + _ = level.Warn(opts.Logger).Log("msg", "the pyroscope.java component only works on linux for amd64 and arm64; enabling it otherwise will do nothing") + return &javaComponent{}, nil + }, + }) +} + +type javaComponent struct { +} + +func (j *javaComponent) Run(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return nil + } + } +} +func (j *javaComponent) Update(args component.Arguments) error { + return nil +} diff --git a/component/pyroscope/java/loop.go b/component/pyroscope/java/loop.go new file mode 100644 index 000000000000..918e97751563 --- /dev/null +++ b/component/pyroscope/java/loop.go @@ -0,0 +1,274 @@ +//go:build linux && (amd64 || arm64) + +package java + +import ( + "context" + _ "embed" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/pyroscope" + "github.com/grafana/agent/component/pyroscope/java/asprof" + "github.com/grafana/agent/pkg/flow/logging/level" + jfrpprof "github.com/grafana/jfr-parser/pprof" + jfrpprofPyroscope "github.com/grafana/jfr-parser/pprof/pyroscope" + "github.com/prometheus/prometheus/model/labels" + gopsutil "github.com/shirou/gopsutil/v3/process" +) + +const spyName = "grafana-agent.java" + +type profilingLoop struct { + logger log.Logger + output *pyroscope.Fanout + cfg ProfilingConfig + wg sync.WaitGroup + mutex sync.Mutex + pid int + target discovery.Target + cancel context.CancelFunc + error error + dist *asprof.Distribution + jfrFile string + startTime time.Time + profiler *asprof.Profiler + sampleRate int +} + +func newProfilingLoop(pid int, target discovery.Target, logger log.Logger, profiler *asprof.Profiler, output *pyroscope.Fanout, cfg ProfilingConfig) *profilingLoop { + ctx, cancel := context.WithCancel(context.Background()) + dist, err := profiler.DistributionForProcess(pid) + p := &profilingLoop{ + logger: log.With(logger, "pid", pid), + output: output, + pid: pid, + target: target, + cancel: cancel, + dist: dist, + jfrFile: fmt.Sprintf("/tmp/asprof-%d-%d.jfr", os.Getpid(), pid), + cfg: cfg, + profiler: profiler, + } + _ = level.Debug(p.logger).Log("msg", "new process", "target", fmt.Sprintf("%+v", target)) + + if err != nil { + p.onError(fmt.Errorf("failed to select dist for pid %d: %w", pid, err)) + return p + } + + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.loop(ctx) + }() + return p +} + +func (p *profilingLoop) loop(ctx context.Context) { + if err := p.profiler.CopyLib(p.dist, p.pid); err != nil { + p.onError(fmt.Errorf("failed to copy libasyncProfiler.so: %w", err)) + return + } + defer func() { + _ = p.stop() + }() + sleep := func() { + timer := time.NewTimer(p.interval()) + defer timer.Stop() + select { + case <-timer.C: + return + case <-ctx.Done(): + return + } + } + for { + err := p.start() + if err != nil { + // could happen when agent restarted - [ERROR] Profiler already started\n + alive := p.onError(fmt.Errorf("failed to start: %w", err)) + if !alive { + return + } + } + sleep() + if ctx.Err() != nil { + return + } + err = p.reset() + if err != nil { + alive := p.onError(fmt.Errorf("failed to reset: %w", err)) + if !alive { + return + } + } + } +} + +func (p *profilingLoop) reset() error { + jfrFile := asprof.ProcessPath(p.jfrFile, p.pid) + startTime := p.startTime + endTime := time.Now() + sampleRate := p.sampleRate + p.startTime = endTime + defer func() { + os.Remove(jfrFile) + }() + + err := p.stop() + if err != nil { + return fmt.Errorf("failed to stop : %w", err) + } + jfrBytes, err := os.ReadFile(jfrFile) + if err != nil { + return fmt.Errorf("failed to read jfr file: %w", err) + } + _ = level.Debug(p.logger).Log("msg", "jfr file read", "len", len(jfrBytes)) + + return p.push(jfrBytes, startTime, endTime, int64(sampleRate)) +} +func (p *profilingLoop) push(jfrBytes []byte, startTime time.Time, endTime time.Time, sampleRate int64) error { + profiles, err := jfrpprof.ParseJFR(jfrBytes, &jfrpprof.ParseInput{ + StartTime: startTime, + EndTime: endTime, + SampleRate: sampleRate, + }, new(jfrpprof.LabelsSnapshot)) + if err != nil { + return fmt.Errorf("failed to parse jfr: %w", err) + } + target := p.getTarget() + for _, req := range profiles.Profiles { + metric := req.Metric + sz := req.Profile.SizeVT() + l := log.With(p.logger, "metric", metric, "sz", sz) + ls := labels.NewBuilder(nil) + for _, l := range jfrpprofPyroscope.Labels(target, profiles.JFREvent, req.Metric, "", spyName) { + ls.Set(l.Name, l.Value) + } + if ls.Get(labelServiceName) == "" { + ls.Set(labelServiceName, inferServiceName(target)) + } + + profile, err := req.Profile.MarshalVT() + if err != nil { + _ = l.Log("msg", "failed to marshal profile", "err", err) + continue + } + samples := []*pyroscope.RawSample{{RawProfile: profile}} + err = p.output.Appender().Append(context.Background(), ls.Labels(), samples) + if err != nil { + _ = l.Log("msg", "failed to push jfr", "err", err) + continue + } + _ = l.Log("msg", "pushed jfr-pprof") + } + return nil +} + +func (p *profilingLoop) start() error { + cfg := p.getConfig() + p.startTime = time.Now() + p.sampleRate = cfg.SampleRate + argv := make([]string, 0, 14) + // asprof cli reference: https://github.com/async-profiler/async-profiler?tab=readme-ov-file#profiler-options + argv = append(argv, + "-f", p.jfrFile, + "-o", "jfr", + ) + if cfg.CPU { + argv = append(argv, "-e", "itimer") + profilingInterval := time.Second.Nanoseconds() / int64(cfg.SampleRate) + argv = append(argv, "-i", strconv.FormatInt(profilingInterval, 10)) + } + if cfg.Alloc != "" { + argv = append(argv, "--alloc", cfg.Alloc) + } + if cfg.Lock != "" { + argv = append(argv, "--lock", cfg.Lock) + } + argv = append(argv, + "start", + "--timeout", strconv.Itoa(int(p.interval().Seconds())), + strconv.Itoa(p.pid), + ) + + _ = level.Debug(p.logger).Log("cmd", fmt.Sprintf("%s %s", p.dist.LauncherPath(), strings.Join(argv, " "))) + stdout, stderr, err := p.profiler.Execute(p.dist, argv) + if err != nil { + return fmt.Errorf("asprof failed to run: %w %s %s", err, stdout, stderr) + } + return nil +} + +func (p *profilingLoop) getConfig() ProfilingConfig { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.cfg +} + +func (p *profilingLoop) stop() error { + argv := []string{ + "stop", + "-o", "jfr", + strconv.Itoa(p.pid), + } + _ = level.Debug(p.logger).Log("msg", "asprof", "cmd", fmt.Sprintf("%s %s", p.dist.LauncherPath(), strings.Join(argv, " "))) + stdout, stderr, err := p.profiler.Execute(p.dist, argv) + if err != nil { + return fmt.Errorf("asprof failed to run: %w %s %s", err, stdout, stderr) + } + _ = level.Debug(p.logger).Log("msg", "asprof stopped", "stdout", stdout, "stderr", stderr) + return nil +} + +func (p *profilingLoop) update(target discovery.Target, config ProfilingConfig) { + p.mutex.Lock() + defer p.mutex.Unlock() + p.target = target + p.cfg = config +} + +// Close stops profiling this profilingLoop +func (p *profilingLoop) Close() error { + p.cancel() + p.wg.Wait() + return nil +} + +func (p *profilingLoop) onError(err error) bool { + alive := p.alive() + if alive { + _ = level.Error(p.logger).Log("err", err) + } else { + _ = level.Debug(p.logger).Log("err", err) + } + p.mutex.Lock() + defer p.mutex.Unlock() + p.error = err + return alive +} + +func (p *profilingLoop) interval() time.Duration { + return p.getConfig().Interval +} + +func (p *profilingLoop) getTarget() discovery.Target { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.target +} + +func (p *profilingLoop) alive() bool { + exists, err := gopsutil.PidExists(int32(p.pid)) + if err != nil { + _ = level.Error(p.logger).Log("msg", "failed to check if process is alive", "err", err) + } + return err == nil && exists +} diff --git a/component/pyroscope/java/target.go b/component/pyroscope/java/target.go new file mode 100644 index 000000000000..25a1defebd54 --- /dev/null +++ b/component/pyroscope/java/target.go @@ -0,0 +1,35 @@ +package java + +import ( + "fmt" + + "github.com/grafana/agent/component/discovery" +) + +const ( + labelServiceName = "service_name" + labelServiceNameK8s = "__meta_kubernetes_pod_annotation_pyroscope_io_service_name" +) + +func inferServiceName(target discovery.Target) string { + k8sServiceName := target[labelServiceNameK8s] + if k8sServiceName != "" { + return k8sServiceName + } + k8sNamespace := target["__meta_kubernetes_namespace"] + k8sContainer := target["__meta_kubernetes_pod_container_name"] + if k8sNamespace != "" && k8sContainer != "" { + return fmt.Sprintf("java/%s/%s", k8sNamespace, k8sContainer) + } + dockerContainer := target["__meta_docker_container_name"] + if dockerContainer != "" { + return dockerContainer + } + if swarmService := target["__meta_dockerswarm_container_label_service_name"]; swarmService != "" { + return swarmService + } + if swarmService := target["__meta_dockerswarm_service_name"]; swarmService != "" { + return swarmService + } + return "unspecified" +} diff --git a/component/pyroscope/scrape/target.go b/component/pyroscope/scrape/target.go index 703a93dd63be..736d75b43f78 100644 --- a/component/pyroscope/scrape/target.go +++ b/component/pyroscope/scrape/target.go @@ -430,5 +430,11 @@ func inferServiceName(lset labels.Labels) string { if dockerContainer != "" { return dockerContainer } + if swarmService := lset.Get("__meta_dockerswarm_container_label_service_name"); swarmService != "" { + return swarmService + } + if swarmService := lset.Get("__meta_dockerswarm_service_name"); swarmService != "" { + return swarmService + } return "unspecified" } diff --git a/converter/internal/prometheusconvert/component/scrape.go b/converter/internal/prometheusconvert/component/scrape.go index a2005cf85fbb..f69f3efa96f9 100644 --- a/converter/internal/prometheusconvert/component/scrape.go +++ b/converter/internal/prometheusconvert/component/scrape.go @@ -52,6 +52,7 @@ func toScrapeArguments(scrapeConfig *prom_config.ScrapeConfig, forwardTo []stora JobName: scrapeConfig.JobName, HonorLabels: scrapeConfig.HonorLabels, HonorTimestamps: scrapeConfig.HonorTimestamps, + TrackTimestampsStaleness: scrapeConfig.TrackTimestampsStaleness, Params: scrapeConfig.Params, ScrapeClassicHistograms: scrapeConfig.ScrapeClassicHistograms, ScrapeInterval: time.Duration(scrapeConfig.ScrapeInterval), diff --git a/converter/internal/prometheusconvert/testdata/scrape.river b/converter/internal/prometheusconvert/testdata/scrape.river index 0ca26d333fb9..002a89c6315c 100644 --- a/converter/internal/prometheusconvert/testdata/scrape.river +++ b/converter/internal/prometheusconvert/testdata/scrape.river @@ -9,11 +9,12 @@ prometheus.scrape "prometheus_1" { app = "foo", }], ) - forward_to = [prometheus.remote_write.default.receiver] - job_name = "prometheus-1" - honor_timestamps = false - scrape_interval = "10s" - scrape_timeout = "5s" + forward_to = [prometheus.remote_write.default.receiver] + job_name = "prometheus-1" + honor_timestamps = false + track_timestamps_staleness = true + scrape_interval = "10s" + scrape_timeout = "5s" basic_auth { username = "user" diff --git a/converter/internal/staticconvert/internal/build/builder.go b/converter/internal/staticconvert/internal/build/builder.go index 58fedf6225c2..dadc4ae3fd96 100644 --- a/converter/internal/staticconvert/internal/build/builder.go +++ b/converter/internal/staticconvert/internal/build/builder.go @@ -204,6 +204,10 @@ func (b *IntegrationsConfigBuilder) appendExporter(commonConfig *int_config.Comm RemoteWriteConfigs: b.cfg.Integrations.ConfigV1.PrometheusRemoteWrite, } + if len(b.cfg.Integrations.ConfigV1.PrometheusRemoteWrite) == 0 { + b.diags.Add(diag.SeverityLevelError, "The converter does not support handling integrations which are not connected to a remote_write.") + } + jobNameToCompLabelsFunc := func(jobName string) string { return b.jobNameToCompLabel(jobName) } diff --git a/converter/internal/staticconvert/testdata/integrations_no_rw.diags b/converter/internal/staticconvert/testdata/integrations_no_rw.diags new file mode 100644 index 000000000000..1f0d463ede34 --- /dev/null +++ b/converter/internal/staticconvert/testdata/integrations_no_rw.diags @@ -0,0 +1,2 @@ +(Error) The converter does not support handling integrations which are not connected to a remote_write. +(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file diff --git a/converter/internal/staticconvert/testdata/integrations_no_rw.yaml b/converter/internal/staticconvert/testdata/integrations_no_rw.yaml new file mode 100644 index 000000000000..76e4848e56b5 --- /dev/null +++ b/converter/internal/staticconvert/testdata/integrations_no_rw.yaml @@ -0,0 +1,4 @@ +integrations: + node_exporter: + scrape_integration: true + enabled: true \ No newline at end of file diff --git a/docs/generator/links_to_types.go b/docs/generator/links_to_types.go index 867654e1648d..8de89bfd1321 100644 --- a/docs/generator/links_to_types.go +++ b/docs/generator/links_to_types.go @@ -38,12 +38,10 @@ func (l *LinksToTypesGenerator) Generate() (string, error) { } note := ` -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} ` return heading + acceptingSection + outputSection + note, nil diff --git a/docs/make-docs b/docs/make-docs index 25176a37f051..d5d861ca83b4 100755 --- a/docs/make-docs +++ b/docs/make-docs @@ -6,7 +6,13 @@ # [Semantic versioning](https://semver.org/) is used to help the reader identify the significance of changes. # Changes are relevant to this script and the support docs.mk GNU Make interface. # - +# ## 5.2.0 (2024-01-18) +# +# ### Changed +# +# - Updated `make vale` to use latest Vale style and configuration. +# - Updated `make vale` to use platform appropriate image. +# # ## 5.1.2 (2023-11-08) # # ### Added @@ -704,14 +710,14 @@ case "${image}" in "${PODMAN}" run \ --init \ --interactive \ - --platform linux/amd64 \ --rm \ + --workdir /etc/vale \ --tty \ ${volumes} \ "${DOCS_IMAGE}" \ "--minAlertLevel=${VALE_MINALERTLEVEL}" \ - --config=/etc/vale/.vale.ini \ - --output=line \ + '--glob=*.md' \ + --output=/etc/vale/rdjsonl.tmpl \ /hugo/content/docs | sed "s#$(proj_dst "${proj}")#sources#" ;; *) diff --git a/docs/rfcs/0000-template.md b/docs/rfcs/0000-template.md index bbc01019c3ab..c565ea04e584 100644 --- a/docs/rfcs/0000-template.md +++ b/docs/rfcs/0000-template.md @@ -3,4 +3,3 @@ * Date: YYYY-MM-DD * Author: Full Name (@github_username) * PR: [grafana/agent#XXXX](https://github.com/grafana/agent/pull/XXXX) -* Status: Draft diff --git a/docs/rfcs/0001-designing-in-the-open.md b/docs/rfcs/0001-designing-in-the-open.md index 8f73f5d7a8d2..7419b060a375 100644 --- a/docs/rfcs/0001-designing-in-the-open.md +++ b/docs/rfcs/0001-designing-in-the-open.md @@ -3,7 +3,6 @@ * Date: 2021-11-02 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#1055](https://github.com/grafana/agent/pull/1055) -* Status: Implemented ## Summary diff --git a/docs/rfcs/0002-integrations-in-operator.md b/docs/rfcs/0002-integrations-in-operator.md index 8003606d3d46..ed54d40de6bd 100644 --- a/docs/rfcs/0002-integrations-in-operator.md +++ b/docs/rfcs/0002-integrations-in-operator.md @@ -3,7 +3,6 @@ * Date: 2022-01-04 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#1224](https://github.com/grafana/agent/pull/1224) -* Status: Draft ## Background diff --git a/docs/rfcs/0003-new-metrics-subsystem.md b/docs/rfcs/0003-new-metrics-subsystem.md index 961cd983e14a..336c0e4cc475 100644 --- a/docs/rfcs/0003-new-metrics-subsystem.md +++ b/docs/rfcs/0003-new-metrics-subsystem.md @@ -3,7 +3,7 @@ * Date: 2021-11-29 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#1140](https://github.com/grafana/agent/pull/1140) -* Status: Draft +* Status: Abandoned ## Background diff --git a/docs/rfcs/0004-agent-flow.md b/docs/rfcs/0004-agent-flow.md index db061af7a16b..3c1052e926ad 100644 --- a/docs/rfcs/0004-agent-flow.md +++ b/docs/rfcs/0004-agent-flow.md @@ -1,14 +1,13 @@ -# This provided the basis for Agent Flow, and though not all the concepts/ideas will make it into flow, it is good to have the historical context for why we started down this path. +# This provided the basis for Agent Flow, and though not all the concepts/ideas will make it into flow, it is good to have the historical context for why we started down this path. -# Agent Flow - Agent Utilizing Components +# Agent Flow - Agent Utilizing Components * Date: 2022-03-30 * Author: Matt Durham (@mattdurham) -* PRs: - * [grafana/agent#1538](https://github.com/grafana/agent/pull/1538) - Problem Statement +* PRs: + * [grafana/agent#1538](https://github.com/grafana/agent/pull/1538) - Problem Statement * [grafana/agent#1546](https://github.com/grafana/agent/pull/1546) - Messages and Expressions -* Status: Draft ## Overarching Problem Statement @@ -17,7 +16,7 @@ The Agents configuration and onboarding is difficult to use. Viewing the effect ## Description -Agent Flow is intended to solve real world needs that the Grafana Agent team have identified in conversations with users and developers. +Agent Flow is intended to solve real world needs that the Grafana Agent team have identified in conversations with users and developers. These broadly include: @@ -32,13 +31,13 @@ These broadly include: - Lack of understanding how telemetry data moves through agent - Other systems use pipeline/extensions to allow users to understand how data moves through the system -# 1. Introduction and Goals +# 1. Introduction and Goals -This design document outlines Agent Flow, a system for describing a programmable pipeline for telemetry data. +This design document outlines Agent Flow, a system for describing a programmable pipeline for telemetry data. Agent Flow refers to both the execution, configuration and visual configurator of data flow. -### Goals +### Goals * Allow users to more easily understand the impact of their configuration * Allow users to collect integration metrics across a set of agents @@ -55,43 +54,43 @@ Agent Flow refers to both the execution, configuration and visual configurator o At a high level, Agent Flow: -* Breaks apart the existing hierarchical configuration file into reusable components +* Breaks apart the existing hierarchical configuration file into reusable components * Allows components to be connected, resulting in a programmable pipeline of telemetry data -This document considers three potential approaches to allow users to connect components together: +This document considers three potential approaches to allow users to connect components together: -1. Message passing (i.e., an actor model) +1. Message passing (i.e., an actor model) 2. Expressions (i.e., directly referencing the output of another component) -3. A hybrid of both messages and expressions +3. A hybrid of both messages and expressions -The Flow Should in general resemble a flowchart or node graph. The data flow diagram would conceptually look like the below, with each node being composable and connecting with other nodes. +The Flow Should in general resemble a flowchart or node graph. The data flow diagram would conceptually look like the below, with each node being composable and connecting with other nodes. ``` -┌─────────────────────────┐ ┌──────────────────┐ ┌─────────────────────┐ ┌───────────────────┐ -│ │ ┌─────▶│ Target Filter │─────────▶│ Redis Integration │──────▶│ Metric Filter │──┐ -│ │ │ └──────────────────┘ └─────────────────────┘ └───────────────────┘ │ -│ Service Discovery │──────┤ │ -│ │ │ │ -│ │ │ │ -└─────────────────────────┘ │ ┌─────────────────┐ ┌──────────────────────┐ ┌────────┘ - ├─────▶│ Target Filter │──────────▶│ MySQL Integrations │───────────┐ │ - │ └─────────────────┘ └──────────────────────┘ │ │ - │ │ │ - │ ┌─────────────────┐ ┌─────────────┐ │ │ +┌─────────────────────────┐ ┌──────────────────┐ ┌─────────────────────┐ ┌───────────────────┐ +│ │ ┌─────▶│ Target Filter │─────────▶│ Redis Integration │──────▶│ Metric Filter │──┐ +│ │ │ └──────────────────┘ └─────────────────────┘ └───────────────────┘ │ +│ Service Discovery │──────┤ │ +│ │ │ │ +│ │ │ │ +└─────────────────────────┘ │ ┌─────────────────┐ ┌──────────────────────┐ ┌────────┘ + ├─────▶│ Target Filter │──────────▶│ MySQL Integrations │───────────┐ │ + │ └─────────────────┘ └──────────────────────┘ │ │ + │ │ │ + │ ┌─────────────────┐ ┌─────────────┐ │ │ └──────▶│ Target Filter │─────────────▶│ Scraper │─────────────┐ │ │ ┌────────────────┐ └─────────────────┘ └─────────────┘ └──┴┬───────┴─▶│ Remote Write │ │ └────────────────┘ - │ - │ -┌──────────────────────────┐ │ -│ Remote Write Receiver │─────┐ ┌───────────────────────┐ │ -└──────────────────────────┘ │ ┌────▶│ Metric Transformer │─────────┘ - │ │ └───────────────────────┘ - │ │ -┌─────────────────────────┐ │ ┌────────────────────┐ │ -│ HTTP Receiver │──────┴─────▶│ Metric Filter │────┘ ┌──────────────────────────────────┐ -└─────────────────────────┘ └────────────────────┘ │ Global and Server Settings │ - └──────────────────────────────────┘ + │ + │ +┌──────────────────────────┐ │ +│ Remote Write Receiver │─────┐ ┌───────────────────────┐ │ +└──────────────────────────┘ │ ┌────▶│ Metric Transformer │─────────┘ + │ │ └───────────────────────┘ + │ │ +┌─────────────────────────┐ │ ┌────────────────────┐ │ +│ HTTP Receiver │──────┴─────▶│ Metric Filter │────┘ ┌──────────────────────────────────┐ +└─────────────────────────┘ └────────────────────┘ │ Global and Server Settings │ + └──────────────────────────────────┘ ``` **Note: Consider all examples pseudoconfig** @@ -107,14 +106,14 @@ Expression based is writing expressions that allow referencing other components **Cons** * Harder for users to wire things together - * References to components are more complex, which may be harder to understand + * References to components are more complex, which may be harder to understand * Harder to build a GUI for * Every field of a component is potentially dynamic, making it harder to represent visually ## 2.2 Message Based -Message based is where components have no knowledge of other components and information is passed strictly via input and output streams. +Message based is where components have no knowledge of other components and information is passed strictly via input and output streams. **Pros** @@ -122,7 +121,7 @@ Message based is where components have no knowledge of other components and info * Easier to build a GUI for * Inputs and Outputs are well defined and less granular * Connections are made by connecting two components directly, compared to expressions which connect subsets of a component's output -* References between components are no more than strings, making the text-based representation language agnostic (e.g., it could be YAML, JSON, or any language) +* References between components are no more than strings, making the text-based representation language agnostic (e.g., it could be YAML, JSON, or any language) **Cons** @@ -130,16 +129,16 @@ Message based is where components have no knowledge of other components and info * Larger type system needed * More structured to keep the amount of types down -Messages require a more rigid type structure to minimize the number of total components. +Messages require a more rigid type structure to minimize the number of total components. For example, it would be preferable to have a single `Credential` type that can be emitted by an s3, Vault, or Consul component. These components would then need to set a field that marks their output as a specific kind of Credential (such as Basic Auth or Bearer Auth). If, instead, you had multiple Credential types, like `MySQLCredentials` and `RedisCredentials`, you would have the following components: -* Vault component for MySQL credentials -* Vault component for Redis credentials -* S3 component for MySQL credentials -* S3 component for Redis credentials +* Vault component for MySQL credentials +* Vault component for Redis credentials +* S3 component for MySQL credentials +* S3 component for Redis credentials * (and so on) ## 2.3 Hybrid @@ -157,10 +156,10 @@ discovery "mysql_pods" { integration "mysql" { - # Create one mysql integration for every element in the array here + # Create one mysql integration for every element in the array here for_each = discovery.mysql_pods.targets - # Each spawned mysql integration has its data_source_name derived from + # Each spawned mysql integration has its data_source_name derived from # the address label of the input target. data_source_name = "root@(${each.labels["__address__"]})" } diff --git a/docs/rfcs/0005-river.md b/docs/rfcs/0005-river.md index 8f4e3e12299b..3fa82a5f7eb2 100644 --- a/docs/rfcs/0005-river.md +++ b/docs/rfcs/0005-river.md @@ -3,7 +3,6 @@ * Date: 2022-06-27 * Author: Robert Fratto (@rfratto), Matt Durham (@mattdurham) * PR: [grafana/agent#1839](https://github.com/grafana/agent/pull/1839) -* Status: Draft ## Summary diff --git a/docs/rfcs/0006-clustering.md b/docs/rfcs/0006-clustering.md index b6c08b2bc210..b29070410e47 100644 --- a/docs/rfcs/0006-clustering.md +++ b/docs/rfcs/0006-clustering.md @@ -3,7 +3,6 @@ * Date: 2023-03-02 * Author: Paschalis Tsilias (@tpaschalis) * PR: [grafana/agent#3151](https://github.com/grafana/agent/pull/3151) -* Status: Draft ## Summary - Background We routinely run agents with 1-10 million active series; we regularly see @@ -98,7 +97,7 @@ presented in the next section. ## Use cases In the first iteration of agent clustering, we would like to start with the following use-cases. These two are distinct in the way that they make use of -scheduling. +scheduling. The first one makes sure that we have a way of notifying components of cluster changes and calling their Update method and continuously re-evaluate ownership @@ -112,9 +111,9 @@ it is scraping/reading logs from. Components that use the Flow concept of a “target” as their Arguments should be able to distribute the target load between themselves. To do that we can introduce a layer of abstraction over the Targets definition that can interact with the Sharder provided by the -clusterer and provide a simple API, for example: +clusterer and provide a simple API, for example: ```go -type Targets interface { +type Targets interface { Get() []Target } ``` @@ -136,9 +135,9 @@ I propose that we start with the following set of components that make use of this functionality: prometheus.scrape, loki.source.file, loki.source.kubernetes, and pyroscope.scrape. -Here’s how the configuration for a component could look like: +Here’s how the configuration for a component could look like: ```river -prometheus.scrape "pods" { +prometheus.scrape "pods" { clustering { node_updates = true } @@ -200,7 +199,7 @@ information. On a more practical note, we’ll have to choose how components might use to opt-in to the component scheduling. -For example, we could implement either: +For example, we could implement either: * Implicitly adding a new Argument block that is implicitly present by default on _all_ components: ``` diff --git a/docs/rfcs/0006-future-of-agent-operator.md b/docs/rfcs/0006-future-of-agent-operator.md index e0ed4bef9304..3a5c3d2e5611 100644 --- a/docs/rfcs/0006-future-of-agent-operator.md +++ b/docs/rfcs/0006-future-of-agent-operator.md @@ -3,7 +3,6 @@ * Date: 2022-08-17 * Author: Craig Peterson (@captncraig) * PR: [grafana/agent#2046](https://github.com/grafana/agent/pull/2046) -* Status: Draft ## Summary @@ -31,6 +30,6 @@ The operator is a fairly complex piece of code, and has been slower than some ot ## Beta status -The Grafana Agent Operator is still considered beta software. It has received a better reception than anticipated, and is now an important part of the Agent project. We are committed to supporting the Operator into the future, but are going to leave the beta designation in place while making larger refactorings as described above. We make efforts to avoid breaking changes, and hope that custom resource definitions will remain compatible, but it is possible some changes will be necessary. We will make every effort to justify and communicate such scenarios as they arise. +The Grafana Agent Operator is still considered beta software. It has received a better reception than anticipated, and is now an important part of the Agent project. We are committed to supporting the Operator into the future, but are going to leave the beta designation in place while making larger refactorings as described above. We make efforts to avoid breaking changes, and hope that custom resource definitions will remain compatible, but it is possible some changes will be necessary. We will make every effort to justify and communicate such scenarios as they arise. Once we are confident we have an Operator we are happy with and that the resource definitions are stable, we will revisit the beta status as soon as we can. diff --git a/docs/rfcs/0007-flow-modules.md b/docs/rfcs/0007-flow-modules.md index f08fb74c0f3b..5058663dd3ba 100644 --- a/docs/rfcs/0007-flow-modules.md +++ b/docs/rfcs/0007-flow-modules.md @@ -3,7 +3,6 @@ * Date: 2023-01-27 * Author: Matt Durham @mattdurham * PR: [grafana/agent#2898](https://github.com/grafana/agent/pull/2898) -* Status: Draft [Formatted Link for ease of user](https://github.com/grafana/agent/blob/rfc_modules/docs/rfcs/0007-flow-modules.md) @@ -30,7 +29,7 @@ During this time the Agent team saw a lot of potential in the idea of "modules." ### Enable re-use of common patterns -Common functionality can be wrapped in a set of common components that form a module. These shared modules can then be used instead of reinventing use cases. +Common functionality can be wrapped in a set of common components that form a module. These shared modules can then be used instead of reinventing use cases. ### Allow loading a module from a string @@ -42,11 +41,11 @@ Modules will be able to load other modules, with reasonable safe guards. There w ### Modules should be sandboxed except via arguments and exports -Modules cannot directly access children or parent modules except through predefined arguments and exports. +Modules cannot directly access children or parent modules except through predefined arguments and exports. ## Non Goals -Non goals represent capabilities that are not going to be done in the initial release of modules but may come in later versions. +Non goals represent capabilities that are not going to be done in the initial release of modules but may come in later versions. * Add additional capabilities to load strings * Any type of versioning @@ -66,7 +65,7 @@ Modules will not contain any sort of versioning nor will check for compatibility ### Any user interface work beyond ensuring it works as the UI currently does -Users will not be able to drill into modules, they will be represented as any other normal component. +Users will not be able to drill into modules, they will be represented as any other normal component. ## Example @@ -122,7 +121,7 @@ prometheus.scrape "scraper" { * A module cannot directly or indirectly load itself, this will not be enforced by the system * Singleton components are not supported at this time. Example [node_exporter](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.integration.node_exporter/). * Modules will not prevent competing resources, such as starting a server on the same port -* [Configuration blocks](https://grafana.com/docs/agent/latest/flow/reference/config-blocks/#configuration-blocks) will not be supported. +* [Configuration blocks](https://grafana.com/docs/agent/latest/flow/reference/config-blocks/#configuration-blocks) will not be supported. * Names of arguments and exports within a module must be unique across that module. ## Proposal diff --git a/docs/rfcs/0008-backwards-compatibility.md b/docs/rfcs/0008-backwards-compatibility.md index 147490f41e40..56d4bac647a3 100644 --- a/docs/rfcs/0008-backwards-compatibility.md +++ b/docs/rfcs/0008-backwards-compatibility.md @@ -3,7 +3,6 @@ * Date: 2023-05-25 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#3981](https://github.com/grafana/agent/pull/3981) -* Status: Draft Grafana Agent has been following [semantic versioning](https://semver.org/) since its inception. After three years of development and 33 minor releases, the project is on trajectory to have a 1.0 release.  diff --git a/docs/sources/_index.md b/docs/sources/_index.md index 780a3800da31..a902be317bab 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -9,7 +9,7 @@ title: Grafana Agent description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: v0.39.0 + AGENT_RELEASE: v0.39.2 OTEL_VERSION: v0.87.0 --- @@ -24,11 +24,11 @@ Grafana Agent is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{% /admonition %}} +{{< /admonition >}} Grafana Agent can collect, transform, and send data to: diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index 549ba33ef8db..daf939a62ac3 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -24,11 +24,11 @@ Grafana Agent is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{% /admonition %}} +{{< /admonition >}} Grafana Agent can collect, transform, and send data to: diff --git a/docs/sources/about.md b/docs/sources/about.md index 57468c7f3e24..eca262408d7d 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -105,10 +105,10 @@ You should run Static mode when: ### Static mode Kubernetes operator -{{% admonition type="note" %}} +{{< admonition type="note" >}} Grafana Agent version 0.37 and newer provides Prometheus Operator compatibility in Flow mode. You should use Grafana Agent Flow mode for all new Grafana Agent deployments. -{{% /admonition %}} +{{< /admonition >}} The [Static mode Kubernetes operator][] is a variant of Grafana Agent introduced on June 17, 2021. It's currently in beta. diff --git a/docs/sources/flow/_index.md b/docs/sources/flow/_index.md index 1b95fbe29ae8..cc800508f222 100644 --- a/docs/sources/flow/_index.md +++ b/docs/sources/flow/_index.md @@ -68,13 +68,13 @@ prometheus.remote_write "default" { ``` -## {{< param "PRODUCT_NAME" >}} configuration generator +## {{% param "PRODUCT_NAME" %}} configuration generator The {{< param "PRODUCT_NAME" >}} [configuration generator](https://grafana.github.io/agent-configurator/) will help you get a head start on creating flow code. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This feature is experimental, and it doesn't support all River components. -{{% /admonition %}} +{{< /admonition >}} ## Next steps diff --git a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md b/docs/sources/flow/concepts/config-language/expressions/types_and_values.md index 1f27c0b5ecac..70afaf790472 100644 --- a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md +++ b/docs/sources/flow/concepts/config-language/expressions/types_and_values.md @@ -167,7 +167,7 @@ If the key isn't a valid identifier, you must wrap it in double quotes like a st } ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} Don't confuse objects with blocks. * An _object_ is a value assigned to an [Attribute][]. You **must** use commas between key-value pairs on separate lines. @@ -175,7 +175,7 @@ Don't confuse objects with blocks. [Attribute]: {{< relref "../syntax.md#Attributes" >}} [Block]: {{< relref "../syntax.md#Blocks" >}} -{{% /admonition %}} +{{< /admonition >}} ## Functions diff --git a/docs/sources/flow/concepts/modules.md b/docs/sources/flow/concepts/modules.md index 940357f30127..28ebbfb499cd 100644 --- a/docs/sources/flow/concepts/modules.md +++ b/docs/sources/flow/concepts/modules.md @@ -39,9 +39,9 @@ Module loader components are responsible for the following functions: Module loaders are typically called `module.LOADER_NAME`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Some module loaders may not support running modules with arguments or exports. -{{% /admonition %}} +{{< /admonition >}} Refer to [Components][] for more information about the module loader components. diff --git a/docs/sources/flow/get-started/_index.md b/docs/sources/flow/get-started/_index.md index 80b48bfdaece..444b64f5afc5 100644 --- a/docs/sources/flow/get-started/_index.md +++ b/docs/sources/flow/get-started/_index.md @@ -20,6 +20,6 @@ weight: 50 # Get started with {{% param "PRODUCT_NAME" %}} This section covers topics that help you get started with {{< param "PRODUCT_NAME" >}}, -including installation, running the agent, overview of deployment topologies, and more. +including installation, running {{< param "PRODUCT_NAME" >}}, overview of deployment topologies, and more. {{< section >}} diff --git a/docs/sources/flow/get-started/install/_index.md b/docs/sources/flow/get-started/install/_index.md index dabb07857d74..25b9a5b2f101 100644 --- a/docs/sources/flow/get-started/install/_index.md +++ b/docs/sources/flow/get-started/install/_index.md @@ -29,9 +29,9 @@ The following architectures are supported: - macOS: AMD64 (Intel), ARM64 (Apple Silicon) - FreeBSD: AMD64 -{{% admonition type="note" %}} -Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but is not recommended or supported. -{{% /admonition %}} +{{< admonition type="note" >}} +Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but isn't recommended or supported. +{{< /admonition >}} {{< section >}} diff --git a/docs/sources/flow/get-started/install/chef.md b/docs/sources/flow/get-started/install/chef.md new file mode 100644 index 000000000000..ef348384a5ed --- /dev/null +++ b/docs/sources/flow/get-started/install/chef.md @@ -0,0 +1,104 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/get-started/install/chef/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/chef/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/chef/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/chef/ + +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/chef/ +description: Learn how to install Grafana Agent Flow with Chef +menuTitle: Chef +title: Install Grafana Agent Flow with Chef +weight: 550 +--- + +# Install {{% param "PRODUCT_NAME" %}} with Chef + +You can use Chef to install and manage {{< param "PRODUCT_NAME" >}}. + +## Before you begin + +- These steps assume you already have a working [Chef][] setup. +- You can add the following resources to any new or existing recipe. +- These tasks install {{< param "PRODUCT_NAME" >}} from the package repositories. The tasks target Linux systems from the following families: + - Debian (including Ubuntu) + - RedHat Enterprise Linux + - Amazon Linux + - Fedora + +## Steps + +To add {{< param "PRODUCT_NAME" >}} to a host: + +1. Add the following resources to your [Chef][] recipe to add the Grafana package repositories to your system: + + ```ruby + if platform_family?('debian', 'rhel', 'amazon', 'fedora') + if platform_family?('debian') + remote_file '/etc/apt/keyrings/grafana.gpg' do + source 'https://apt.grafana.com/gpg.key' + mode '0644' + action :create + end + + file '/etc/apt/sources.list.d/grafana.list' do + content "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com/ stable main" + mode '0644' + notifies :update, 'apt_update[update apt cache]', :immediately + end + + apt_update 'update apt cache' do + action :nothing + end + elsif platform_family?('rhel', 'amazon', 'fedora') + yum_repository 'grafana' do + description 'grafana' + baseurl 'https://rpm.grafana.com/oss/rpm' + gpgcheck true + gpgkey 'https://rpm.grafana.com/gpg.key' + enabled true + action :create + notifies :run, 'execute[add-rhel-key]', :immediately + end + + execute 'add-rhel-key' do + command "rpm --import https://rpm.grafana.com/gpg.key" + action :nothing + end + end + else + fail "The #{node['platform_family']} platform is not supported." + end + ``` + +1. Add the following resources to install and enable the `grafana-agent-flow` service: + + ```ruby + package 'grafana-agent-flow' do + action :install + flush_cache [ :before ] if platform_family?('amazon', 'rhel', 'fedora') + notifies :restart, 'service[grafana-agent-flow]', :delayed + end + + service 'grafana-agent-flow' do + service_name 'grafana-agent-flow' + action [:enable, :start] + end + ``` + +## Configuration + +The `grafana-agent-flow` package installs a default configuration file that doesn't send telemetry anywhere. + +The default configuration file location is `/etc/grafana-agent-flow.river`. You can replace this file with your own configuration or create a new configuration file for the service to use. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Chef]: https://www.chef.io/products/chef-infrastructure-management/ + +{{% docs/reference %}} +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/docker.md b/docs/sources/flow/get-started/install/docker.md index c7884a6dc21b..c7e07b1b3b7a 100644 --- a/docs/sources/flow/get-started/install/docker.md +++ b/docs/sources/flow/get-started/install/docker.md @@ -57,10 +57,10 @@ Replace the following: You can modify the last line to change the arguments passed to the {{< param "PRODUCT_NAME" >}} binary. Refer to the documentation for [run][] for more information about the options available to the `run` command. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. -{{% /admonition %}} +{{< /admonition >}} ## Run a Windows Docker container @@ -82,10 +82,10 @@ Replace the following: You can modify the last line to change the arguments passed to the {{< param "PRODUCT_NAME" >}} binary. Refer to the documentation for [run][] for more information about the options available to the `run` command. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. -{{% /admonition %}} +{{< /admonition >}} ## Verify diff --git a/docs/sources/flow/get-started/install/kubernetes.md b/docs/sources/flow/get-started/install/kubernetes.md index 9326fce4bf03..d045c7b5ce13 100644 --- a/docs/sources/flow/get-started/install/kubernetes.md +++ b/docs/sources/flow/get-started/install/kubernetes.md @@ -30,10 +30,10 @@ weight: 200 ## Deploy -{{% admonition type="note" %}} +{{< admonition type="note" >}} These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for {{< param "PRODUCT_NAME" >}}. You can deploy {{< param "PRODUCT_ROOT_NAME" >}} either in static mode or flow mode. The Helm chart deploys {{< param "PRODUCT_NAME" >}} by default. -{{% /admonition %}} +{{< /admonition >}} To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the following commands in a terminal window: diff --git a/docs/sources/flow/get-started/install/macos.md b/docs/sources/flow/get-started/install/macos.md index 9903e13ff632..c16f70e6d941 100644 --- a/docs/sources/flow/get-started/install/macos.md +++ b/docs/sources/flow/get-started/install/macos.md @@ -22,9 +22,9 @@ weight: 400 You can install {{< param "PRODUCT_NAME" >}} on macOS with Homebrew . -{{% admonition type="note" %}} +{{< admonition type="note" >}} The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{% /admonition %}} +{{< /admonition >}} ## Before you begin diff --git a/docs/sources/flow/get-started/install/puppet.md b/docs/sources/flow/get-started/install/puppet.md new file mode 100644 index 000000000000..db3fb2b4886d --- /dev/null +++ b/docs/sources/flow/get-started/install/puppet.md @@ -0,0 +1,113 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/get-started/install/puppet/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/puppet/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/puppet/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/puppet/ + +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/puppet/ +description: Learn how to install Grafana Agent Flow with Puppet +menuTitle: Puppet +title: Install Grafana Agent Flow with Puppet +weight: 560 +--- + +# Install {{% param "PRODUCT_NAME" %}} with Puppet + +You can use Puppet to install and manage {{< param "PRODUCT_NAME" >}}. + +## Before you begin + +- These steps assume you already have a working [Puppet][] setup. +- You can add the following manifest to any new or existing module. +- The manifest installs {{< param "PRODUCT_NAME" >}} from the package repositories. It targets Linux systems from the following families: + - Debian (including Ubuntu) + - RedHat Enterprise Linux (including Fedora) + +## Steps + +To add {{< param "PRODUCT_NAME" >}} to a host: + +1. Ensure that the following module dependencies are declared and installed: + + ```json + { + "name": "puppetlabs/apt", + "version_requirement": ">= 4.1.0 <= 7.0.0" + }, + { + "name": "puppetlabs/yumrepo_core", + "version_requirement": "<= 2.0.0" + } + ``` + +1. Create a new [Puppet][] manifest with the following class to add the Grafana package repositories, install the `grafana-agent-flow` package, and run the service: + + ```ruby + class grafana_agent::grafana_agent_flow () { + case $::os['family'] { + 'debian': { + apt::source { 'grafana': + location => 'https://apt.grafana.com/', + release => '', + repos => 'stable main', + key => { + id => 'B53AE77BADB630A683046005963FA27710458545', + source => 'https://apt.grafana.com/gpg.key', + }, + } -> package { 'grafana-agent-flow': + require => Exec['apt_update'], + } -> service { 'grafana-agent-flow': + ensure => running, + name => 'grafana-agent-flow', + enable => true, + subscribe => Package['grafana-agent-flow'], + } + } + 'redhat': { + yumrepo { 'grafana': + ensure => 'present', + name => 'grafana', + descr => 'grafana', + baseurl => 'https://packages.grafana.com/oss/rpm', + gpgkey => 'https://packages.grafana.com/gpg.key', + enabled => '1', + gpgcheck => '1', + target => '/etc/yum.repo.d/grafana.repo', + } -> package { 'grafana-agent-flow': + } -> service { 'grafana-agent-flow': + ensure => running, + name => 'grafana-agent-flow', + enable => true, + subscribe => Package['grafana-agent-flow'], + } + } + default: { + fail("Unsupported OS family: (${$::os['family']})") + } + } + } + ``` + +1. To use this class in a module, add the following line to the module's `init.pp` file: + + ```ruby + include grafana_agent::grafana_agent_flow + ``` + +## Configuration + +The `grafana-agent-flow` package installs a default configuration file that doesn't send telemetry anywhere. + +The default configuration file location is `/etc/grafana-agent-flow.river`. You can replace this file with your own configuration, or create a new configuration file for the service to use. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Puppet]: https://www.puppet.com/ + +{{% docs/reference %}} +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/windows.md b/docs/sources/flow/get-started/install/windows.md index 2be2fabc6019..a20ed3449792 100644 --- a/docs/sources/flow/get-started/install/windows.md +++ b/docs/sources/flow/get-started/install/windows.md @@ -52,7 +52,7 @@ To do a silent install of {{< param "PRODUCT_NAME" >}} on Windows, perform the f 1. Run the following command in PowerShell or Command Prompt: - ```shell + ```cmd /S ``` diff --git a/docs/sources/flow/get-started/run/binary.md b/docs/sources/flow/get-started/run/binary.md index 7f9fda22ff77..0b9ac5b7d74a 100644 --- a/docs/sources/flow/get-started/run/binary.md +++ b/docs/sources/flow/get-started/run/binary.md @@ -46,9 +46,9 @@ Replace the following: You can set up and manage the standalone binary for {{< param "PRODUCT_NAME" >}} as a Linux systemd service. -{{% admonition type="note" %}} +{{< admonition type="note" >}} These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} configuration. -{{% /admonition %}} +{{< /admonition >}} 1. To create a new user called `grafana-agent-flow` run the following command in a terminal window: diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md index a9a3810ec3ee..3b44d662e87a 100644 --- a/docs/sources/flow/reference/cli/convert.md +++ b/docs/sources/flow/reference/cli/convert.md @@ -21,13 +21,13 @@ The `convert` command converts a supported configuration format to {{< param "PR Usage: -* `AGENT_MODE=flow grafana-agent convert [FLAG ...] FILE_NAME` -* `grafana-agent-flow convert [FLAG ...] FILE_NAME` +* `AGENT_MODE=flow grafana-agent convert [ ...] ` +* `grafana-agent-flow convert [ ...] ` Replace the following: - * `FLAG`: One or more flags that define the input and output of the command. - * `FILE_NAME`: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. + * _``_: One or more flags that define the input and output of the command. + * _``_: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is equal to `-`, `convert` converts the contents of standard input. Otherwise, @@ -70,7 +70,7 @@ where an output can still be generated. These can be bypassed using the ### Prometheus -Using the `--source-format=prometheus` will convert the source config from +Using the `--source-format=prometheus` will convert the source configuration from [Prometheus v2.45](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/) to {{< param "PRODUCT_NAME" >}} configuration. diff --git a/docs/sources/flow/reference/cli/tools.md b/docs/sources/flow/reference/cli/tools.md index b45e7f215a23..b9fb73a761bd 100644 --- a/docs/sources/flow/reference/cli/tools.md +++ b/docs/sources/flow/reference/cli/tools.md @@ -15,10 +15,10 @@ weight: 400 The `tools` command contains command line tooling grouped by Flow component. -{{% admonition type="caution" %}} +{{< admonition type="caution" >}} Utilities in this command have no backward compatibility guarantees and may change or be removed between releases. -{{% /admonition %}} +{{< /admonition >}} ## Subcommands diff --git a/docs/sources/flow/reference/compatibility/_index.md b/docs/sources/flow/reference/compatibility/_index.md index 88121838f571..cdd9426cfb27 100644 --- a/docs/sources/flow/reference/compatibility/_index.md +++ b/docs/sources/flow/reference/compatibility/_index.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/compatible-components/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/compatible-components/ - /docs/grafana-cloud/send-data/agent/flow/reference/compatible-components/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/compatible-components/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/compatibility/ description: Learn about which components are compatible with each other in Grafana Agent Flow title: Compatible components weight: 400 @@ -12,23 +12,19 @@ weight: 400 # Compatible components -This section provides an overview of _some_ of the possible connections between -compatible components in Grafana Agent Flow. +This section provides an overview of _some_ of the possible connections between compatible components in {{< param "PRODUCT_NAME" >}}. -For each common data type, we provide a list of compatible components -that can export or consume it. - -{{% admonition type="note" %}} +For each common data type, we provide a list of compatible components that can export or consume it. +{{< admonition type="note" >}} The type of export may not be the only requirement for chaining components together. The value of an attribute may matter as well as its type. -Please refer to each component's documentation for more details on what values are acceptable. +Refer to each component's documentation for more details on what values are acceptable. For example: * A Prometheus component may always expect an `"__address__"` label inside a list of targets. * A `string` argument may only accept certain values like "traceID" or "spanID". - -{{% /admonition %}} +{{< /admonition >}} ## Targets @@ -69,6 +65,7 @@ The following components, grouped by namespace, _export_ Targets. - [discovery.nomad]({{< relref "../components/discovery.nomad.md" >}}) - [discovery.openstack]({{< relref "../components/discovery.openstack.md" >}}) - [discovery.ovhcloud]({{< relref "../components/discovery.ovhcloud.md" >}}) +- [discovery.process]({{< relref "../components/discovery.process.md" >}}) - [discovery.puppetdb]({{< relref "../components/discovery.puppetdb.md" >}}) - [discovery.relabel]({{< relref "../components/discovery.relabel.md" >}}) - [discovery.scaleway]({{< relref "../components/discovery.scaleway.md" >}}) @@ -121,6 +118,7 @@ The following components, grouped by namespace, _consume_ Targets. {{< collapse title="discovery" >}} +- [discovery.process]({{< relref "../components/discovery.process.md" >}}) - [discovery.relabel]({{< relref "../components/discovery.relabel.md" >}}) {{< /collapse >}} @@ -144,6 +142,7 @@ The following components, grouped by namespace, _consume_ Targets. {{< collapse title="pyroscope" >}} - [pyroscope.ebpf]({{< relref "../components/pyroscope.ebpf.md" >}}) +- [pyroscope.java]({{< relref "../components/pyroscope.java.md" >}}) - [pyroscope.scrape]({{< relref "../components/pyroscope.scrape.md" >}}) {{< /collapse >}} @@ -152,7 +151,7 @@ The following components, grouped by namespace, _consume_ Targets. ## Prometheus `MetricsReceiver` -The Prometheus metrics are sent between components using `MetricsReceiver`s. +The Prometheus metrics are sent between components using `MetricsReceiver`s. `MetricsReceiver`s are [capsules]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) that are exported by components that can receive Prometheus metrics. Components that can consume Prometheus metrics can be passed the `MetricsReceiver` as an argument. Use the @@ -178,7 +177,6 @@ The following components, grouped by namespace, _export_ Prometheus `MetricsRece ### Prometheus `MetricsReceiver` Consumers The following components, grouped by namespace, _consume_ Prometheus `MetricsReceiver`. - @@ -197,8 +195,6 @@ The following components, grouped by namespace, _consume_ Prometheus `MetricsRec - - ## Loki `LogsReceiver` `LogsReceiver` is a [capsule]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) @@ -262,7 +258,6 @@ The following components, grouped by namespace, _consume_ Loki `LogsReceiver`. - ## OpenTelemetry `otelcol.Consumer` The OpenTelemetry data is sent between components using `otelcol.Consumer`s. @@ -295,6 +290,7 @@ The following components, grouped by namespace, _export_ OpenTelemetry `otelcol. - [otelcol.processor.k8sattributes]({{< relref "../components/otelcol.processor.k8sattributes.md" >}}) - [otelcol.processor.memory_limiter]({{< relref "../components/otelcol.processor.memory_limiter.md" >}}) - [otelcol.processor.probabilistic_sampler]({{< relref "../components/otelcol.processor.probabilistic_sampler.md" >}}) +- [otelcol.processor.resourcedetection]({{< relref "../components/otelcol.processor.resourcedetection.md" >}}) - [otelcol.processor.span]({{< relref "../components/otelcol.processor.span.md" >}}) - [otelcol.processor.tail_sampling]({{< relref "../components/otelcol.processor.tail_sampling.md" >}}) - [otelcol.processor.transform]({{< relref "../components/otelcol.processor.transform.md" >}}) @@ -323,6 +319,7 @@ The following components, grouped by namespace, _consume_ OpenTelemetry `otelcol - [otelcol.processor.k8sattributes]({{< relref "../components/otelcol.processor.k8sattributes.md" >}}) - [otelcol.processor.memory_limiter]({{< relref "../components/otelcol.processor.memory_limiter.md" >}}) - [otelcol.processor.probabilistic_sampler]({{< relref "../components/otelcol.processor.probabilistic_sampler.md" >}}) +- [otelcol.processor.resourcedetection]({{< relref "../components/otelcol.processor.resourcedetection.md" >}}) - [otelcol.processor.span]({{< relref "../components/otelcol.processor.span.md" >}}) - [otelcol.processor.tail_sampling]({{< relref "../components/otelcol.processor.tail_sampling.md" >}}) - [otelcol.processor.transform]({{< relref "../components/otelcol.processor.transform.md" >}}) @@ -338,8 +335,6 @@ The following components, grouped by namespace, _consume_ OpenTelemetry `otelcol - - ## Pyroscope `ProfilesReceiver` The Pyroscope profiles are sent between components using `ProfilesReceiver`s. @@ -368,8 +363,8 @@ The following components, grouped by namespace, _consume_ Pyroscope `ProfilesRec {{< collapse title="pyroscope" >}} - [pyroscope.ebpf]({{< relref "../components/pyroscope.ebpf.md" >}}) +- [pyroscope.java]({{< relref "../components/pyroscope.java.md" >}}) - [pyroscope.scrape]({{< relref "../components/pyroscope.scrape.md" >}}) {{< /collapse >}} - diff --git a/docs/sources/flow/reference/components/discovery.azure.md b/docs/sources/flow/reference/components/discovery.azure.md index 83eceabdf7a6..94b38bbec2a0 100644 --- a/docs/sources/flow/reference/components/discovery.azure.md +++ b/docs/sources/flow/reference/components/discovery.azure.md @@ -158,11 +158,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.consul.md b/docs/sources/flow/reference/components/discovery.consul.md index c63f94b8017c..e45e6a3ec040 100644 --- a/docs/sources/flow/reference/components/discovery.consul.md +++ b/docs/sources/flow/reference/components/discovery.consul.md @@ -177,11 +177,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.consulagent.md b/docs/sources/flow/reference/components/discovery.consulagent.md index df923fed4496..5557410188b1 100644 --- a/docs/sources/flow/reference/components/discovery.consulagent.md +++ b/docs/sources/flow/reference/components/discovery.consulagent.md @@ -138,11 +138,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.digitalocean.md b/docs/sources/flow/reference/components/discovery.digitalocean.md index 2a64ba7f6bec..a24eabaa0803 100644 --- a/docs/sources/flow/reference/components/discovery.digitalocean.md +++ b/docs/sources/flow/reference/components/discovery.digitalocean.md @@ -128,11 +128,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.dns.md b/docs/sources/flow/reference/components/discovery.dns.md index d2f0217b1d73..70fb3a64b9e1 100644 --- a/docs/sources/flow/reference/components/discovery.dns.md +++ b/docs/sources/flow/reference/components/discovery.dns.md @@ -103,11 +103,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.docker.md b/docs/sources/flow/reference/components/discovery.docker.md index 4d6ce94d557f..5a8518f22873 100644 --- a/docs/sources/flow/reference/components/discovery.docker.md +++ b/docs/sources/flow/reference/components/discovery.docker.md @@ -225,11 +225,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.dockerswarm.md b/docs/sources/flow/reference/components/discovery.dockerswarm.md index 58c065fb06eb..c1a7f8616cee 100644 --- a/docs/sources/flow/reference/components/discovery.dockerswarm.md +++ b/docs/sources/flow/reference/components/discovery.dockerswarm.md @@ -248,11 +248,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.ec2.md b/docs/sources/flow/reference/components/discovery.ec2.md index 7f01ae48c6e0..cc7f49259594 100644 --- a/docs/sources/flow/reference/components/discovery.ec2.md +++ b/docs/sources/flow/reference/components/discovery.ec2.md @@ -175,11 +175,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.eureka.md b/docs/sources/flow/reference/components/discovery.eureka.md index 70ab3f8f666d..93c76d9d09f8 100644 --- a/docs/sources/flow/reference/components/discovery.eureka.md +++ b/docs/sources/flow/reference/components/discovery.eureka.md @@ -162,11 +162,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.file.md b/docs/sources/flow/reference/components/discovery.file.md index c8493e01e62a..a78c39feabf7 100644 --- a/docs/sources/flow/reference/components/discovery.file.md +++ b/docs/sources/flow/reference/components/discovery.file.md @@ -182,11 +182,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.gce.md b/docs/sources/flow/reference/components/discovery.gce.md index 5752a4ce51b1..1a662bec2911 100644 --- a/docs/sources/flow/reference/components/discovery.gce.md +++ b/docs/sources/flow/reference/components/discovery.gce.md @@ -122,11 +122,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.hetzner.md b/docs/sources/flow/reference/components/discovery.hetzner.md index c6922e685f66..f917f8417a84 100644 --- a/docs/sources/flow/reference/components/discovery.hetzner.md +++ b/docs/sources/flow/reference/components/discovery.hetzner.md @@ -186,11 +186,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.http.md b/docs/sources/flow/reference/components/discovery.http.md index 50ecf42dcc06..80639fe7077f 100644 --- a/docs/sources/flow/reference/components/discovery.http.md +++ b/docs/sources/flow/reference/components/discovery.http.md @@ -192,11 +192,9 @@ discovery.http "dynamic_targets" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.ionos.md b/docs/sources/flow/reference/components/discovery.ionos.md index 1c619a1641ac..378556ad7886 100644 --- a/docs/sources/flow/reference/components/discovery.ionos.md +++ b/docs/sources/flow/reference/components/discovery.ionos.md @@ -161,11 +161,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.kubelet.md b/docs/sources/flow/reference/components/discovery.kubelet.md index 7ef29244a01e..bf0e1085dcde 100644 --- a/docs/sources/flow/reference/components/discovery.kubelet.md +++ b/docs/sources/flow/reference/components/discovery.kubelet.md @@ -206,11 +206,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.kubernetes.md b/docs/sources/flow/reference/components/discovery.kubernetes.md index 1d4b2f9210c5..8745ff5ddd7a 100644 --- a/docs/sources/flow/reference/components/discovery.kubernetes.md +++ b/docs/sources/flow/reference/components/discovery.kubernetes.md @@ -466,10 +466,10 @@ Replace the following: This example limits the search to pods on the same node as this {{< param "PRODUCT_ROOT_NAME" >}}. This configuration could be useful if you are running {{< param "PRODUCT_ROOT_NAME" >}} as a DaemonSet. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This example assumes you have used Helm chart to deploy {{< param "PRODUCT_NAME" >}} in Kubernetes and sets `HOSTNAME` to the Kubernetes host name. If you have a custom Kubernetes deployment, you must adapt this example to your configuration. -{{% /admonition %}} +{{< /admonition >}} ```river discovery.kubernetes "k8s_pods" { @@ -510,11 +510,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.kuma.md b/docs/sources/flow/reference/components/discovery.kuma.md index c498753f58ab..8763bc2eb357 100644 --- a/docs/sources/flow/reference/components/discovery.kuma.md +++ b/docs/sources/flow/reference/components/discovery.kuma.md @@ -145,11 +145,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.lightsail.md b/docs/sources/flow/reference/components/discovery.lightsail.md index 81bdb0c706b9..22868c58faeb 100644 --- a/docs/sources/flow/reference/components/discovery.lightsail.md +++ b/docs/sources/flow/reference/components/discovery.lightsail.md @@ -108,11 +108,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.linode.md b/docs/sources/flow/reference/components/discovery.linode.md index 77d01dbdf4e2..6a5733c9e6cc 100644 --- a/docs/sources/flow/reference/components/discovery.linode.md +++ b/docs/sources/flow/reference/components/discovery.linode.md @@ -20,9 +20,9 @@ discovery.linode "LABEL" { } ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} The linode APIv4 Token must be created with the scopes: `linodes:read_only`, `ips:read_only`, and `events:read_only`. -{{% /admonition %}} +{{< /admonition >}} ## Arguments @@ -185,11 +185,9 @@ prometheus.remote_write "demo" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.marathon.md b/docs/sources/flow/reference/components/discovery.marathon.md index b19ddb321c2c..43c50ab468de 100644 --- a/docs/sources/flow/reference/components/discovery.marathon.md +++ b/docs/sources/flow/reference/components/discovery.marathon.md @@ -155,11 +155,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.nerve.md b/docs/sources/flow/reference/components/discovery.nerve.md index 1334f6dea8e8..d8c7fc24bbe2 100644 --- a/docs/sources/flow/reference/components/discovery.nerve.md +++ b/docs/sources/flow/reference/components/discovery.nerve.md @@ -106,11 +106,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.nomad.md b/docs/sources/flow/reference/components/discovery.nomad.md index aebd128bb320..14c51bb6c72f 100644 --- a/docs/sources/flow/reference/components/discovery.nomad.md +++ b/docs/sources/flow/reference/components/discovery.nomad.md @@ -156,11 +156,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.openstack.md b/docs/sources/flow/reference/components/discovery.openstack.md index 83df98d8c41c..13b6f4924232 100644 --- a/docs/sources/flow/reference/components/discovery.openstack.md +++ b/docs/sources/flow/reference/components/discovery.openstack.md @@ -166,11 +166,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.ovhcloud.md b/docs/sources/flow/reference/components/discovery.ovhcloud.md index 453fcb3c1cfc..a433f4544a9a 100644 --- a/docs/sources/flow/reference/components/discovery.ovhcloud.md +++ b/docs/sources/flow/reference/components/discovery.ovhcloud.md @@ -155,11 +155,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.process.md b/docs/sources/flow/reference/components/discovery.process.md new file mode 100644 index 000000000000..839948d3d65b --- /dev/null +++ b/docs/sources/flow/reference/components/discovery.process.md @@ -0,0 +1,215 @@ +--- +aliases: + - /docs/grafana-cloud/agent/flow/reference/components/discovery.process/ + - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.process/ + - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.process/ + - /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.process/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.process/ +description: Learn about discovery.process +title: discovery.process +--- + +# discovery.process + +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} + +`discovery.process` discovers processes running on the local Linux OS. + +{{< admonition type="note" >}} +To use the `discovery.process` component you must run {{< param "PRODUCT_NAME" >}} as root and inside host PID namespace. +{{< /admonition >}} + +## Usage + +```river +discovery.process "LABEL" { + +} +``` + +## Arguments + +The following arguments are supported: + +| Name | Type | Description | Default | Required | +|--------------------|---------------------|-----------------------------------------------------------------------------------------|---------|----------| +| `join` | `list(map(string))` | Join external targets to discovered processes targets based on `__container_id__` label. | | no | +| `refresh_interval` | `duration` | How often to sync targets. | "60s" | no | + +### Targets joining + +If `join` is specified, `discovery.process` will join the discovered processes based on the `__container_id__` label. + +For example, if `join` is specified as follows: + +```json +[ + { + "pod": "pod-1", + "__container_id__": "container-1" + }, + { + "pod": "pod-2", + "__container_id__": "container-2" + } +] +``` + +And the discovered processes are: + +```json +[ + { + "__process_pid__": "1", + "__container_id__": "container-1" + }, + { + "__process_pid__": "2" + } +] +``` + +The resulting targets are: + +```json +[ + { + "__container_id__": "container-1", + "__process_pid__": "1", + "pod": "pod-1" + }, + { + "__process_pid__": "2" + }, + { + "__container_id__": "container-1", + "pod": "pod-1" + }, + { + "__container_id__": "container-2", + "pod": "pod-2" + } +] +``` + +## Blocks + +The following blocks are supported inside the definition of +`discovery.process`: + +| Hierarchy | Block | Description | Required | +|-----------------|---------------------|-----------------------------------------------|----------| +| discover_config | [discover_config][] | Configures which process metadata to discover. | no | + +[discover_config]: #discover_config-block + +### discover_config block + +The `discover_config` block describes which process metadata to discover. + +The following arguments are supported: + +| Name | Type | Description | Default | Required | +|----------------|--------|-----------------------------------------------------------------|---------|----------| +| `exe` | `bool` | A flag to enable discovering `__meta_process_exe` label. | true | no | +| `cwd` | `bool` | A flag to enable discovering `__meta_process_cwd` label. | true | no | +| `commandline` | `bool` | A flag to enable discovering `__meta_process_commandline` label. | true | no | +| `uid` | `bool` | A flag to enable discovering `__meta_process_uid`: label. | true | no | +| `username` | `bool` | A flag to enable discovering `__meta_process_username`: label. | true | no | +| `container_id` | `bool` | A flag to enable discovering `__container_id__` label. | true | no | + +## Exported fields + +The following fields are exported and can be referenced by other components: + +| Name | Type | Description | +|-----------|---------------------|--------------------------------------------------------| +| `targets` | `list(map(string))` | The set of processes discovered on the local Linux OS. | + +Each target includes the following labels: + +* `__process_pid__`: The process PID. +* `__meta_process_exe`: The process executable path. Taken from `/proc//exe`. +* `__meta_process_cwd`: The process current working directory. Taken from `/proc//cwd`. +* `__meta_process_commandline`: The process command line. Taken from `/proc//cmdline`. +* `__meta_process_uid`: The process UID. Taken from `/proc//status`. +* `__meta_process_username`: The process username. Taken from `__meta_process_uid` and `os/user/LookupID`. +* `__container_id__`: The container ID. Taken from `/proc//cgroup`. If the process is not running in a container, + this label is not set. + +## Component health + +`discovery.process` is only reported as unhealthy when given an invalid +configuration. In those cases, exported fields retain their last healthy +values. + +## Debug information + +`discovery.process` does not expose any component-specific debug information. + +## Debug metrics + +`discovery.process` does not expose any component-specific debug metrics. + +## Examples + +### Example discovering processes on the local host + +```river +discovery.process "all" { + refresh_interval = "60s" + discover_config { + cwd = true + exe = true + commandline = true + username = true + uid = true + container_id = true + } +} + +``` + +### Example discovering processes on the local host and joining with `discovery.kubernetes` + +```river +discovery.kubernetes "pyroscope_kubernetes" { + selectors { + field = "spec.nodeName=" + env("HOSTNAME") + role = "pod" + } + role = "pod" +} + +discovery.process "all" { + join = discovery.kubernetes.pyroscope_kubernetes.targets + refresh_interval = "60s" + discover_config { + cwd = true + exe = true + commandline = true + username = true + uid = true + container_id = true + } +} + +``` + + +## Compatible components + +`discovery.process` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) + +`discovery.process` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/discovery.puppetdb.md b/docs/sources/flow/reference/components/discovery.puppetdb.md index a83d8454723c..c4e984bcd440 100644 --- a/docs/sources/flow/reference/components/discovery.puppetdb.md +++ b/docs/sources/flow/reference/components/discovery.puppetdb.md @@ -166,11 +166,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.relabel.md b/docs/sources/flow/reference/components/discovery.relabel.md index fb0928359273..5269f662f13f 100644 --- a/docs/sources/flow/reference/components/discovery.relabel.md +++ b/docs/sources/flow/reference/components/discovery.relabel.md @@ -135,11 +135,9 @@ discovery.relabel "keep_backend_only" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.scaleway.md b/docs/sources/flow/reference/components/discovery.scaleway.md index fc3ec8867212..f65aa6941346 100644 --- a/docs/sources/flow/reference/components/discovery.scaleway.md +++ b/docs/sources/flow/reference/components/discovery.scaleway.md @@ -183,11 +183,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.serverset.md b/docs/sources/flow/reference/components/discovery.serverset.md index 7eb43b5ee11d..a986c2966c18 100644 --- a/docs/sources/flow/reference/components/discovery.serverset.md +++ b/docs/sources/flow/reference/components/discovery.serverset.md @@ -104,11 +104,9 @@ prometheus.remote_write "default" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.triton.md b/docs/sources/flow/reference/components/discovery.triton.md index f48ae7f65b17..9a0c48d260cd 100644 --- a/docs/sources/flow/reference/components/discovery.triton.md +++ b/docs/sources/flow/reference/components/discovery.triton.md @@ -138,11 +138,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.uyuni.md b/docs/sources/flow/reference/components/discovery.uyuni.md index 42b77e8952b6..25909d8d5217 100644 --- a/docs/sources/flow/reference/components/discovery.uyuni.md +++ b/docs/sources/flow/reference/components/discovery.uyuni.md @@ -132,11 +132,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/faro.receiver.md b/docs/sources/flow/reference/components/faro.receiver.md index 3c15253f126a..72c95bfc195e 100644 --- a/docs/sources/flow/reference/components/faro.receiver.md +++ b/docs/sources/flow/reference/components/faro.receiver.md @@ -278,11 +278,9 @@ Replace the following: - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/local.file_match.md b/docs/sources/flow/reference/components/local.file_match.md index 8c3ff3a43062..c9a083198718 100644 --- a/docs/sources/flow/reference/components/local.file_match.md +++ b/docs/sources/flow/reference/components/local.file_match.md @@ -158,11 +158,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.echo.md b/docs/sources/flow/reference/components/loki.echo.md index 756ffa00ee18..8109de1b96d0 100644 --- a/docs/sources/flow/reference/components/loki.echo.md +++ b/docs/sources/flow/reference/components/loki.echo.md @@ -76,11 +76,9 @@ loki.echo "example" { } - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/flow/reference/components/loki.process.md index d1c8ef723bd9..91d25d7fdeee 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/flow/reference/components/loki.process.md @@ -382,7 +382,7 @@ following key-value pair to the set of extracted data. username: agent ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} Due to a limitation of the upstream jmespath library, you must wrap any string that contains a hyphen `-` in quotes so that it's not considered a numerical expression. @@ -394,7 +394,7 @@ You can use one of two options to circumvent this issue: 1. An escaped double quote. For example: `http_user_agent = "\"request_User-Agent\""` 1. A backtick quote. For example: ``http_user_agent = `"request_User-Agent"` `` -{{% /admonition %}} +{{< /admonition >}} ### stage.label_drop block @@ -581,9 +581,9 @@ The following arguments are supported: | `action` | `string` | The action to take when the selector matches the log line. Supported values are `"keep"` and `"drop"` | `"keep"` | no | | `drop_counter_reason` | `string` | A custom reason to report for dropped lines. | `"match_stage"` | no | -{{% admonition type="note" %}} +{{< admonition type="note" >}} The filters do not include label filter expressions such as `| label == "foobar"`. -{{% /admonition %}} +{{< /admonition >}} The `stage.match` block supports a number of `stage.*` inner blocks, like the top-level block. These are used to construct the nested set of stages to run if the @@ -1758,11 +1758,9 @@ loki.process "local" { - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.relabel.md b/docs/sources/flow/reference/components/loki.relabel.md index 4344af151b22..f60f5b2d40b4 100644 --- a/docs/sources/flow/reference/components/loki.relabel.md +++ b/docs/sources/flow/reference/components/loki.relabel.md @@ -124,11 +124,9 @@ loki.relabel "keep_error_only" { - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.api.md b/docs/sources/flow/reference/components/loki.source.api.md index afc2f3dad112..4c3f1fce87f3 100644 --- a/docs/sources/flow/reference/components/loki.source.api.md +++ b/docs/sources/flow/reference/components/loki.source.api.md @@ -126,11 +126,9 @@ loki.source.api "loki_push_api" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.awsfirehose.md b/docs/sources/flow/reference/components/loki.source.awsfirehose.md index 9b1d2c6d75c5..e621b750357d 100644 --- a/docs/sources/flow/reference/components/loki.source.awsfirehose.md +++ b/docs/sources/flow/reference/components/loki.source.awsfirehose.md @@ -120,9 +120,9 @@ The following blocks are supported inside the definition of `loki.source.awsfire ## Debug metrics The following are some of the metrics that are exposed when this component is used. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The metrics include labels such as `status_code` where relevant, which you can use to measure request success rates. -{{%/admonition %}} +{{< /admonition >}} - `loki_source_awsfirehose_request_errors` (counter): Count of errors while receiving a request. - `loki_source_awsfirehose_record_errors` (counter): Count of errors while decoding an individual record. @@ -206,11 +206,9 @@ loki.relabel "logging_origin" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md index fcbe22aa4880..f8aad7676b23 100644 --- a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md +++ b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md @@ -143,11 +143,9 @@ loki.write "example" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.cloudflare.md b/docs/sources/flow/reference/components/loki.source.cloudflare.md index cee51de6a541..0ce0312fcdeb 100644 --- a/docs/sources/flow/reference/components/loki.source.cloudflare.md +++ b/docs/sources/flow/reference/components/loki.source.cloudflare.md @@ -218,11 +218,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.docker.md b/docs/sources/flow/reference/components/loki.source.docker.md index 02bf03175b20..79a1204199e1 100644 --- a/docs/sources/flow/reference/components/loki.source.docker.md +++ b/docs/sources/flow/reference/components/loki.source.docker.md @@ -172,11 +172,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.file.md b/docs/sources/flow/reference/components/loki.source.file.md index edb407593c1f..a581ac0da043 100644 --- a/docs/sources/flow/reference/components/loki.source.file.md +++ b/docs/sources/flow/reference/components/loki.source.file.md @@ -17,9 +17,9 @@ title: loki.source.file Multiple `loki.source.file` components can be specified by giving them different labels. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `loki.source.file` does not handle file discovery. You can use `local.file_match` for file discovery. Refer to the [File Globbing](#file-globbing) example for more information. -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -245,11 +245,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.gcplog.md b/docs/sources/flow/reference/components/loki.source.gcplog.md index 2ce88f73f398..4cb7cb59cbc0 100644 --- a/docs/sources/flow/reference/components/loki.source.gcplog.md +++ b/docs/sources/flow/reference/components/loki.source.gcplog.md @@ -202,11 +202,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.gelf.md b/docs/sources/flow/reference/components/loki.source.gelf.md index ac5796051be5..ccb0f7b37968 100644 --- a/docs/sources/flow/reference/components/loki.source.gelf.md +++ b/docs/sources/flow/reference/components/loki.source.gelf.md @@ -98,11 +98,9 @@ loki.write "endpoint" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.heroku.md b/docs/sources/flow/reference/components/loki.source.heroku.md index 8f2c01cea68c..df0df9a7bfc2 100644 --- a/docs/sources/flow/reference/components/loki.source.heroku.md +++ b/docs/sources/flow/reference/components/loki.source.heroku.md @@ -153,11 +153,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.journal.md b/docs/sources/flow/reference/components/loki.source.journal.md index 0448bd572d74..f80294e331ff 100644 --- a/docs/sources/flow/reference/components/loki.source.journal.md +++ b/docs/sources/flow/reference/components/loki.source.journal.md @@ -110,11 +110,9 @@ loki.write "endpoint" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.kafka.md b/docs/sources/flow/reference/components/loki.source.kafka.md index eb5e04217298..7f62ac3ff75e 100644 --- a/docs/sources/flow/reference/components/loki.source.kafka.md +++ b/docs/sources/flow/reference/components/loki.source.kafka.md @@ -183,11 +183,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes.md b/docs/sources/flow/reference/components/loki.source.kubernetes.md index e9d19237aef6..a14e305d6d39 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes.md +++ b/docs/sources/flow/reference/components/loki.source.kubernetes.md @@ -210,11 +210,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md b/docs/sources/flow/reference/components/loki.source.kubernetes_events.md index 4447a915cfae..49a9f8b7d824 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md +++ b/docs/sources/flow/reference/components/loki.source.kubernetes_events.md @@ -180,11 +180,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.podlogs.md b/docs/sources/flow/reference/components/loki.source.podlogs.md index 5e957c6ead09..2559fd95e055 100644 --- a/docs/sources/flow/reference/components/loki.source.podlogs.md +++ b/docs/sources/flow/reference/components/loki.source.podlogs.md @@ -299,11 +299,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.syslog.md b/docs/sources/flow/reference/components/loki.source.syslog.md index 017cc43ee0c5..c1c0900d4835 100644 --- a/docs/sources/flow/reference/components/loki.source.syslog.md +++ b/docs/sources/flow/reference/components/loki.source.syslog.md @@ -162,11 +162,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.windowsevent.md b/docs/sources/flow/reference/components/loki.source.windowsevent.md index bb41a62cc3eb..ae706b17c5d2 100644 --- a/docs/sources/flow/reference/components/loki.source.windowsevent.md +++ b/docs/sources/flow/reference/components/loki.source.windowsevent.md @@ -84,11 +84,9 @@ loki.write "endpoint" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.write.md b/docs/sources/flow/reference/components/loki.write.md index 75aad04f3f2a..946f72c5d324 100644 --- a/docs/sources/flow/reference/components/loki.write.md +++ b/docs/sources/flow/reference/components/loki.write.md @@ -243,11 +243,9 @@ Any labels that start with `__` will be removed before sending to the endpoint. - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md index d5ba0e340255..fd5639a03bf4 100644 --- a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md +++ b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md @@ -47,18 +47,19 @@ mimir.rules.kubernetes "LABEL" { `mimir.rules.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required --------------------------|------------|---------------------------------------------------------------------------------|---------|--------- -`address` | `string` | URL of the Mimir ruler. | | yes -`tenant_id` | `string` | Mimir tenant ID. | | no -`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no -`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no -`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +| Name | Type | Description | Default | Required | +| ------------------------ | ---------- | ------------------------------------------------------------------------------- | ------------- | -------- | +| `address` | `string` | URL of the Mimir ruler. | | yes | +| `tenant_id` | `string` | Mimir tenant ID. | | no | +| `use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no | +| `prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no | +| `sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no | +| `mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no | +| `bearer_token` | `secret` | Bearer token to authenticate with. | | no | +| `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no | +| `proxy_url` | `string` | HTTP proxy to proxy requests through. | | no | +| `follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no | +| `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no | At most one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -81,6 +82,13 @@ The `mimir_namespace_prefix` argument can be used to separate the rules managed by multiple {{< param "PRODUCT_NAME" >}} deployments across your infrastructure. It should be set to a unique value for each deployment. +If `use_legacy_routes` is set to `true`, `mimir.rules.kubernetes` contacts Mimir on a `/api/v1/rules` endpoint. + +If `prometheus_http_prefix` is set to `/mimir`, `mimir.rules.kubernetes` contacts Mimir on a `/mimir/config/v1/rules` endpoint. +This is useful if you configure Mimir to use a different [prefix][gem-path-prefix] for its Prometheus endpoints than the default one. + +`prometheus_http_prefix` is ignored if `use_legacy_routes` is set to `true`. + ## Blocks The following blocks are supported inside the definition of diff --git a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md index ab3e55b5521f..8feb3dbff49e 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md +++ b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md @@ -231,11 +231,9 @@ traces_service_graph_request_failed_total{client="shop-backend",client_http_meth - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md b/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md index 5811b64b7733..1c49cd59554d 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md @@ -291,11 +291,9 @@ For an input trace like this... - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md index 23c2eaa0a24d..80004502676a 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md @@ -751,13 +751,13 @@ The example below uses the [merge_maps][] OTTL function. If the resource attributes are not treated in either of the ways described above, an error such as this one could be logged by `prometheus.remote_write`: `the sample has been rejected because another sample with the same timestamp, but a different value, has already been ingested (err-mimir-sample-duplicate-timestamp)`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} In order for a Prometheus `target_info` metric to be generated, the incoming spans resource scope attributes must contain `service.name` and `service.instance.id` attributes. The `target_info` metric will be generated for each resource scope, while OpenTelemetry metric names and attributes will be normalized to be compliant with Prometheus naming rules. -{{% /admonition %}} +{{< /admonition >}} [merge_maps]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/ottlfuncs/README.md#merge_maps [prom-data-model]: https://prometheus.io/docs/concepts/data_model/ @@ -774,11 +774,9 @@ metric names and attributes will be normalized to be compliant with Prometheus n - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md index 4552adce44ce..59283441f97c 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md @@ -961,11 +961,9 @@ k3d cluster delete grafana-agent-lb-test - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.exporter.logging.md b/docs/sources/flow/reference/components/otelcol.exporter.logging.md index c1e4c8413948..739f717426ea 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.logging.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.logging.md @@ -115,11 +115,9 @@ otelcol.exporter.logging "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loki.md b/docs/sources/flow/reference/components/otelcol.exporter.loki.md index 9a314c3b5aae..ae14eba57f74 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loki.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.loki.md @@ -171,11 +171,9 @@ loki.write "local" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md index 6236f784d705..69f2700659aa 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md @@ -205,7 +205,7 @@ You can create an `otlp` exporter that sends your data to a managed service, for ```river otelcol.exporter.otlp "grafana_cloud_tempo" { client { - endpoint = "https://tempo-xxx.grafana.net/tempo" + endpoint = "tempo-xxx.grafana.net/tempo:443" auth = otelcol.auth.basic.grafana_cloud_tempo.handler } } @@ -222,11 +222,9 @@ otelcol.auth.basic "grafana_cloud_tempo" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} - - \ No newline at end of file + diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md index 14d0c5112fad..a6cb0e4c5832 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md @@ -163,11 +163,9 @@ otelcol.exporter.otlphttp "tempo" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md index 4285f34cc799..bed0cdd6e48c 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md @@ -58,7 +58,7 @@ When `include_scope_labels` is `true` the `otel_scope_name` and When `include_target_info` is true, OpenTelemetry Collector resources are converted into `target_info` metrics. -{{% admonition type="note" %}} +{{< admonition type="note" >}} OTLP metrics can have a lot of resource attributes. Setting `resource_to_telemetry_conversion` to `true` would convert all of them to Prometheus labels, which may not be what you want. @@ -68,7 +68,7 @@ See [Creating Prometheus labels from OTLP resource attributes][] for an example. [Creating Prometheus labels from OTLP resource attributes]: #creating-prometheus-labels-from-otlp-resource-attributes -{{% /admonition %}} +{{< /admonition >}} ## Exported fields @@ -183,11 +183,9 @@ prometheus.remote_write "mimir" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.attributes.md b/docs/sources/flow/reference/components/otelcol.processor.attributes.md index ae1b1eafe555..febcbb934664 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.attributes.md +++ b/docs/sources/flow/reference/components/otelcol.processor.attributes.md @@ -166,11 +166,11 @@ For example, adding a `span_names` filter could cause the component to error if The `exclude` block provides an option to exclude data from being fed into the [action] blocks based on the properties of a span, log, or metric records. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Signals excluded by the `exclude` block will still be propagated to downstream components as-is. If you would like to not propagate certain signals to downstream components, consider a processor such as [otelcol.processor.tail_sampling]({{< relref "./otelcol.processor.tail_sampling.md" >}}). -{{% /admonition %}} +{{< /admonition >}} {{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} @@ -646,11 +646,9 @@ otelcol.processor.attributes "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.batch.md b/docs/sources/flow/reference/components/otelcol.processor.batch.md index 7a8eff522ff5..cf8b42d534f8 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.batch.md +++ b/docs/sources/flow/reference/components/otelcol.processor.batch.md @@ -239,11 +239,9 @@ otelcol.exporter.otlp "production" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.discovery.md b/docs/sources/flow/reference/components/otelcol.processor.discovery.md index 9d9b7c05e3a3..258f821a789d 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.discovery.md +++ b/docs/sources/flow/reference/components/otelcol.processor.discovery.md @@ -17,15 +17,15 @@ of labels for each discovered target. `otelcol.processor.discovery` adds resource attributes to spans which have a hostname matching the one in the `__address__` label provided by the `discovery.*` component. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.discovery` is a custom component unrelated to any processors from the OpenTelemetry Collector. -{{% /admonition %}} +{{< /admonition >}} Multiple `otelcol.processor.discovery` components can be specified by giving them different labels. -{{% admonition type="note" %}} +{{< admonition type="note" >}} It can be difficult to follow [OpenTelemetry semantic conventions][OTEL sem conv] when adding resource attributes via `otelcol.processor.discovery`: * `discovery.relabel` and most `discovery.*` processes such as `discovery.kubernetes` @@ -47,7 +47,7 @@ from Static mode's `prom_sd_operation_type`/`prom_sd_pod_associations` [configur [Prometheus data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels [OTEL sem conv]: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/README.md [Traces]: {{< relref "../../../static/configuration/traces-config.md" >}} -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -205,11 +205,9 @@ otelcol.processor.discovery "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.processor.filter.md b/docs/sources/flow/reference/components/otelcol.processor.filter.md index 49a11028a80c..7fe282407be5 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.filter.md +++ b/docs/sources/flow/reference/components/otelcol.processor.filter.md @@ -39,22 +39,22 @@ the following metrics-only functions are used exclusively by the processor: * `end_time_unix_nano - start_time_unix_nano` * `sum([1, 2, 3, 4]) + (10 / 1) - 1` -{{% admonition type="note" %}} +{{< admonition type="note" >}} Raw River strings can be used to write OTTL statements. For example, the OTTL statement `attributes["grpc"] == true` is written in River as \`attributes["grpc"] == true\` -{{% /admonition %}} +{{< /admonition >}} -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.filter` is a wrapper over the upstream OpenTelemetry Collector `filter` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.filter` components by giving them different labels. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} Exercise caution when using `otelcol.processor.filter`: - Make sure you understand schema/format of the incoming data and test the configuration thoroughly. @@ -64,7 +64,7 @@ Exercise caution when using `otelcol.processor.filter`: if the log references the dropped span. [Orphaned Telemetry]: https://github.com/open-telemetry/opentelemetry-collector/blob/v0.85.0/docs/standard-warnings.md#orphaned-telemetry -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -316,11 +316,9 @@ Some values in the River strings are [escaped][river-strings]: - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md b/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md index 6e16dcebcd48..8d3d9601065c 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md +++ b/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md @@ -14,11 +14,11 @@ title: otelcol.processor.k8sattributes `otelcol.processor.k8sattributes` accepts telemetry data from other `otelcol` components and adds Kubernetes metadata to the resource attributes of spans, logs, or metrics. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.k8sattributes` is a wrapper over the upstream OpenTelemetry Collector `k8sattributes` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.k8sattributes` components by giving them different labels. @@ -422,11 +422,9 @@ prometheus.remote_write "mimir" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md b/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md index 9d1528adf70d..30a9f1614149 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md +++ b/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md @@ -121,11 +121,9 @@ information. - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md b/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md index a76c85b2a21b..de866428c515 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md +++ b/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md @@ -15,11 +15,11 @@ title: otelcol.processor.probabilistic_sampler `otelcol.processor.probabilistic_sampler` accepts logs and traces data from other otelcol components and applies probabilistic sampling based on configuration options. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.probabilistic_sampler` is a wrapper over the upstream OpenTelemetry Collector Contrib `probabilistic_sampler` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.probabilistic_sampler` components by giving them different labels. @@ -157,11 +157,9 @@ otelcol.processor.probabilistic_sampler "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md b/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md new file mode 100644 index 000000000000..d6d476c481aa --- /dev/null +++ b/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md @@ -0,0 +1,931 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.resourcedetection/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.resourcedetection/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.resourcedetection/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.resourcedetection/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.resourcedetection/ +labels: + stage: beta +title: otelcol.processor.resourcedetection +description: Learn about otelcol.processor.resourcedetection +--- + +# otelcol.processor.resourcedetection + +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} + +`otelcol.processor.resourcedetection` detects resource information from the host +in a format that conforms to the [OpenTelemetry resource semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/tree/main/specification/resource/semantic_conventions/), and appends or +overrides the resource values in the telemetry data with this information. + +{{< admonition type="note" >}} +`otelcol.processor.resourcedetection` is a wrapper over the upstream +OpenTelemetry Collector Contrib `resourcedetection` processor. If necessary, +bug reports or feature requests are redirected to the upstream repository. +{{< /admonition >}} + +You can specify multiple `otelcol.processor.resourcedetection` components by giving them +different labels. + +## Usage + +```river +otelcol.processor.resourcedetection "LABEL" { + output { + logs = [...] + metrics = [...] + traces = [...] + } +} +``` + +## Arguments + +`otelcol.processor.resourcedetection` supports the following arguments: + +Name | Type | Description | Default | Required +----------- | -------------- | ----------------------------------------------------------------------------------- |---------- | -------- +`detectors` | `list(string)` | An ordered list of named detectors used to detect resource information. | `["env"]` | no +`override` | `bool` | Configures whether existing resource attributes should be overridden or preserved. | `true` | no +`timeout` | `duration` | Timeout by which all specified detectors must complete. | `"5s"` | no + +`detectors` could contain the following values: +* `env` +* `ec2` +* `ecs` +* `eks` +* `elasticbeanstalk` +* `lambda` +* `azure` +* `aks` +* `consul` +* `docker` +* `gcp` +* `heroku` +* `system` +* `openshift` +* `kubernetes_node` + +`env` is the only detector that is not configured through a River block. +The `env` detector reads resource information from the `OTEL_RESOURCE_ATTRIBUTES` environment variable. +This variable must be in the format `=,=,...`, +the details of which are currently pending confirmation in the OpenTelemetry specification. + +If a detector other than `env` is needed, you can customize it with the relevant River block. +For example, you can customize the `ec2` detector with the [ec2][] block. +If you omit the [ec2][] block, the defaults specified in the [ec2][] block documentation are used. + +If multiple detectors are inserting the same attribute name, the first detector to insert wins. +For example, if you had `detectors = ["eks", "ec2"]` then `cloud.platform` will be `aws_eks` instead of `ec2`. + +The following order is recommended for AWS: + 1. [lambda][] + 1. [elasticbeanstalk][] + 1. [eks][] + 1. [ecs][] + 1. [ec2][] + +## Blocks + +The following blocks are supported inside the definition of `otelcol.processor.resourcedetection`: + +Hierarchy | Block | Description | Required +----------------- | --------------------- | ------------------------------------------------- | -------- +output | [output][] | Configures where to send received telemetry data. | yes +ec2 | [ec2][] | | no +ecs | [ecs][] | | no +eks | [eks][] | | no +elasticbeanstalk | [elasticbeanstalk][] | | no +lambda | [lambda][] | | no +azure | [azure][] | | no +aks | [aks][] | | no +consul | [consul][] | | no +docker | [docker][] | | no +gcp | [gcp][] | | no +heroku | [heroku][] | | no +system | [system][] | | no +openshift | [openshift][] | | no +kubernetes_node | [kubernetes_node][] | | no + +[output]: #output +[ec2]: #ec2 +[ecs]: #ecs +[eks]: #eks +[elasticbeanstalk]: #elasticbeanstalk +[lambda]: #lambda +[azure]: #azure +[aks]: #aks +[consul]: #consul +[docker]: #docker +[gcp]: #gcp +[heroku]: #heroku +[system]: #system +[openshift]: #openshift +[kubernetes_node]: #kubernetes_node + +[res-attr-cfg]: #resource-attribute-config + +### output + +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} + +### ec2 + +The `ec2` block reads resource information from the [EC2 instance metadata API] using the [AWS SDK for Go][]. + +The `ec2` block supports the following attributes: + +Attribute | Type | Description | Default | Required +----------- |----------------| --------------------------------------------------------------------------- |-------------| -------- +`tags` | `list(string)` | A list of regular expressions to match against tag keys of an EC2 instance. | `[]` | no + +If you are using a proxy server on your EC2 instance, it's important that you exempt requests for instance metadata as described in the [AWS cli user guide][]. +Failing to do so can result in proxied or missing instance data. + +If the instance is part of AWS ParallelCluster and the detector is failing to connect to the metadata server, +check the iptable and make sure the chain `PARALLELCLUSTER_IMDS` contains a rule that allows the {{< param "PRODUCT_ROOT_NAME" >}} user to access `169.254.169.254/32`. + +[AWS SDK for Go]: https://docs.aws.amazon.com/sdk-for-go/api/aws/ec2metadata/ +[EC2 instance metadata API]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +[AWS cli user guide]: https://github.com/awsdocs/aws-cli-user-guide/blob/a2393582590b64bd2a1d9978af15b350e1f9eb8e/doc_source/cli-configure-proxy.md#using-a-proxy-on-amazon-ec2-instances + +`tags` can be used to gather tags for the EC2 instance which {{< param "PRODUCT_ROOT_NAME" >}} is running on. +To fetch EC2 tags, the IAM role assigned to the EC2 instance must have a policy that includes the `ec2:DescribeTags` permission. + +The `ec2` block supports the following blocks: + +Block | Description | Required +---------------------------------------------- | ------------------------------------------------- | -------- +[resource_attributes](#ec2--resource_attributes) | Configures which resource attributes to add. | no + +##### ec2 > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------------- | --------------------------------------------------------------------------------------------------- | -------- +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.availability_zone][res-attr-cfg] | Toggles the `cloud.availability_zone` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.image.id][res-attr-cfg] | Toggles the `host.image.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[host.type][res-attr-cfg] | Toggles the `host.type` resource attribute.
Sets `enabled` to `true` by default. | no + +### ecs + +The `ecs` block queries the Task Metadata Endpoint (TMDE) to record information about the current ECS Task. Only TMDE V4 and V3 are supported. + +[Task Metadata Endpoint]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html + +The `ecs` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#ecs--resource_attributes) | Configures which resource attributes to add. | no + +#### ecs > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------------- | --------------------------------------------------------------------------------------------------- | -------- +[aws.ecs.cluster.arn][res-attr-cfg] | Toggles the `aws.ecs.cluster.arn` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.launchtype][res-attr-cfg] | Toggles the `aws.ecs.launchtype` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.task.arn][res-attr-cfg] | Toggles the `aws.ecs.task.arn` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.task.family][res-attr-cfg] | Toggles the `aws.ecs.task.family` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.task.revision][res-attr-cfg] | Toggles the `aws.ecs.task.revision` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.group.arns][res-attr-cfg] | Toggles the `aws.log.group.arns` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.group.names][res-attr-cfg] | Toggles the `aws.log.group.names` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.stream.arns][res-attr-cfg] | Toggles the `aws.log.stream.arns` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.stream.names][res-attr-cfg] | Toggles the `aws.log.stream.names` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.availability_zone][res-attr-cfg] | Toggles the `cloud.availability_zone` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no + +### eks + +The `eks` block adds resource attributes for Amazon EKS. + +The `eks` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#eks--resource_attributes) | Configures which resource attributes to add. | no + +#### eks > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------- | ------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"aws"` +* `cloud.platform`: `"aws_eks"` + +### elasticbeanstalk + +The `elasticbeanstalk` block reads the AWS X-Ray configuration file available on all Beanstalk instances with [X-Ray Enabled][]. + +[X-Ray Enabled]: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-configuration-debugging.html + +The `elasticbeanstalk` block supports the following blocks: + +Block | Description | Required +--------------------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#elasticbeanstalk--resource_attributes) | Configures which resource attributes to add. | no + +#### elasticbeanstalk > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------- | --------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[deployment.envir][res-attr-cfg] | Toggles the `deployment.envir` resource attribute.
Sets `enabled` to `true` by default. | no +[service.instance][res-attr-cfg] | Toggles the `service.instance` resource attribute.
Sets `enabled` to `true` by default. | no +[service.version][res-attr-cfg] | Toggles the `service.version` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"aws"` +* `cloud.platform`: `"aws_elastic_beanstalk"` + +### lambda + +The `lambda` block uses the AWS Lambda [runtime environment variables][lambda-env-vars] to retrieve various resource attributes. + +[lambda-env-vars]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime + +The `lambda` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#lambda--resource_attributes) | Configures which resource attributes to add. | no + +#### lambda > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------------- | --------------------------------------------------------------------------------------------------- | -------- +[aws.log.group.names][res-attr-cfg] | Toggles the `aws.log.group.names` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.stream.names][res-attr-cfg] | Toggles the `aws.log.stream.names` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.instance][res-attr-cfg] | Toggles the `faas.instance` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.max_memory][res-attr-cfg] | Toggles the `faas.max_memory` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.name][res-attr-cfg] | Toggles the `faas.name` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.version][res-attr-cfg] | Toggles the `faas.version` resource attribute.
Sets `enabled` to `true` by default. | no + +[Cloud semantic conventions][]: +* `cloud.provider`: `"aws"` +* `cloud.platform`: `"aws_lambda"` +* `cloud.region`: `$AWS_REGION` + +[Function as a Service semantic conventions][] and [AWS Lambda semantic conventions][]: +* `faas.name`: `$AWS_LAMBDA_FUNCTION_NAME` +* `faas.version`: `$AWS_LAMBDA_FUNCTION_VERSION` +* `faas.instance`: `$AWS_LAMBDA_LOG_STREAM_NAME` +* `faas.max_memory`: `$AWS_LAMBDA_FUNCTION_MEMORY_SIZE` + +[AWS Logs semantic conventions][]: +* `aws.log.group.names`: `$AWS_LAMBDA_LOG_GROUP_NAME` +* `aws.log.stream.names`: `$AWS_LAMBDA_LOG_STREAM_NAME` + +[Cloud semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud.md +[Function as a Service semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/faas.md +[AWS Lambda semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/instrumentation/aws-lambda.md#resource-detector +[AWS Logs semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud_provider/aws/logs.md + +### azure + +The `azure` block queries the [Azure Instance Metadata Service][] to retrieve various resource attributes. + +[Azure Instance Metadata Service]: https://aka.ms/azureimds + +The `azure` block supports the following blocks: + +Block | Description | Required +---------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#azure--resource_attributes) | Configures which resource attributes to add. | no + +#### azure > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +-----------------------------------------|------------------------------------------------------------------------------------------------------|--------- +[azure.resourcegroup.name][res-attr-cfg] | Toggles the `azure.resourcegroup.name` resource attribute.
Sets `enabled` to `true` by default. | no +[azure.vm.name][res-attr-cfg] | Toggles the `azure.vm.name` resource attribute.
Sets `enabled` to `true` by default. | no +[azure.vm.scaleset.name][res-attr-cfg] | Toggles the `azure.vm.scaleset.name` resource attribute.
Sets `enabled` to `true` by default. | no +[azure.vm.size][res-attr-cfg] | Toggles the `azure.vm.size` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"azure"` +* `cloud.platform`: `"azure_vm"` + +### aks + +The `aks` block adds resource attributes related to Azure AKS. + +The `aks` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#aks--resource_attributes) | Configures which resource attributes to add. | no + +#### aks > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------- | ------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"azure"` +* `cloud.platform`: `"azure_vm"` + +### consul + +The `consul` block queries a Consul agent and reads its configuration endpoint to retrieve values for resource attributes. + +The `consul` block supports the following attributes: + +Attribute | Type | Description | Default | Required +-------------|----------------|-----------------------------------------------------------------------------------|---------|--------- +`address` | `string` | The address of the Consul server | `""` | no +`datacenter` | `string` | Datacenter to use. If not provided, the default agent datacenter is used. | `""` | no +`token` | `secret` | A per-request ACL token which overrides the Consul agent's default (empty) token. | `""` | no +`namespace` | `string` | The name of the namespace to send along for the request. | `""` | no +`meta` | `list(string)` | Allowlist of [Consul Metadata][] keys to use as resource attributes. | `[]` | no + +`token` is only required if [Consul's ACL System][] is enabled. + +[Consul Metadata]: https://www.consul.io/docs/agent/options#node_meta +[Consul's ACL System]: https://www.consul.io/docs/security/acl/acl-system + +The `consul` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#consul--resource_attributes) | Configures which resource attributes to add. | no + +#### consul > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +-----------------------------|------------------------------------------------------------------------------------------|--------- +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no + +### docker + +The `docker` block queries the Docker daemon to retrieve various resource attributes from the host machine. + +You need to mount the Docker socket (`/var/run/docker.sock` on Linux) to contact the Docker daemon. +Docker detection does not work on MacOS. + +The `docker` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#docker--resource_attributes) | Configures which resource attributes to add. | no + +#### docker > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------|---------------------------------------------------------------------------------------|--------- +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[os.type][res-attr-cfg] | Toggles the `os.type` resource attribute.
Sets `enabled` to `true` by default. | no + +### gcp + +The `gcp` block detects resource attributes using the [Google Cloud Client Libraries for Go][], which reads resource information from the [GCP metadata server][]. +The detector also uses environment variables to identify which GCP platform the application is running on, and assigns appropriate resource attributes for that platform. + +Use the `gcp` detector regardless of the GCP platform {{< param "PRODUCT_ROOT_NAME" >}} is running on. + +[Google Cloud Client Libraries for Go]: https://github.com/googleapis/google-cloud-go +[GCP metadata server]: https://cloud.google.com/compute/docs/storing-retrieving-metadata + +The `gcp` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#gcp--resource_attributes) | Configures which resource attributes to add. | no + +#### gcp > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +---------------------------------------------|----------------------------------------------------------------------------------------------------------|--------- +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.availability_zone][res-attr-cfg] | Toggles the `cloud.availability_zone` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.id][res-attr-cfg] | Toggles the `faas.id` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.instance][res-attr-cfg] | Toggles the `faas.instance` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.name][res-attr-cfg] | Toggles the `faas.name` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.version][res-attr-cfg] | Toggles the `faas.version` resource attribute.
Sets `enabled` to `true` by default. | no +[gcp.cloud_run.job.execution][res-attr-cfg] | Toggles the `gcp.cloud_run.job.execution` resource attribute.
Sets `enabled` to `true` by default. | no +[gcp.cloud_run.job.task_index][res-attr-cfg] | Toggles the `gcp.cloud_run.job.task_index` resource attribute.
Sets `enabled` to `true` by default. | no +[gcp.gce.instance.hostname][res-attr-cfg] | Toggles the `gcp.gce.instance.hostname` resource attribute.
Sets `enabled` to `false` by default. | no +[gcp.gce.instance.name][res-attr-cfg] | Toggles the `gcp.gce.instance.name` resource attribute.
Sets `enabled` to `false` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[host.type][res-attr-cfg] | Toggles the `host.type` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.cluster.name][res-attr-cfg] | Toggles the `k8s.cluster.name` resource attribute.
Sets `enabled` to `true` by default. | no + +#### Google Compute Engine (GCE) metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_compute_engine"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `cloud.availability_zone`: e.g. `"us-central1-c"` +* `host.id`: instance id +* `host.name`: instance name +* `host.type`: machine type +* (optional) `gcp.gce.instance.hostname` +* (optional) `gcp.gce.instance.name` + +#### Google Kubernetes Engine (GKE) metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_kubernetes_engine"` +* `cloud.account.id`: project id +* `cloud.region`: only for regional GKE clusters; e.g. `"us-central1"` +* `cloud.availability_zone`: only for zonal GKE clusters; e.g. `"us-central1-c"` +* `k8s.cluster.name` +* `host.id`: instance id +* `host.name`: instance name; only when workload identity is disabled + +One known issue happens when GKE workload identity is enabled. The GCE metadata endpoints won't be available, +and the GKE resource detector won't be able to determine `host.name`. +If this happens, you can set `host.name` from one of the following resources: +- Get the `node.name` through the [downward API][] with the `env` detector. +- Get the Kubernetes node name from the Kubernetes API (with `k8s.io/client-go`). + +[downward API]: https://kubernetes.io/docs/concepts/workloads/pods/downward-api/ + +#### Google Cloud Run Services metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_cloud_run"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `faas.id`: instance id +* `faas.name`: service name +* `faas.version`: service revision + +#### Cloud Run Jobs metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_cloud_run"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `faas.id`: instance id +* `faas.name`: service name +* `gcp.cloud_run.job.execution`: e.g. `"my-service-ajg89"` +* `gcp.cloud_run.job.task_index`: e.g. `"0"` + +#### Google Cloud Functions metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_cloud_functions"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `faas.id`: instance id +* `faas.name`: function name +* `faas.version`: function version + +#### Google App Engine metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_app_engine"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `cloud.availability_zone`: e.g. `"us-central1-c"` +* `faas.id`: instance id +* `faas.name`: service name +* `faas.version`: service version + +### heroku + +The `heroku` block adds resource attributes derived from [Heroku dyno metadata][]. + +The `heroku` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#heroku--resource_attributes) | Configures which resource attributes to add. | no + +#### heroku > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------------------------|---------------------------------------------------------------------------------------------------------------|--------- +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.app.id][res-attr-cfg] | Toggles the `heroku.app.id` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.dyno.id][res-attr-cfg] | Toggles the `heroku.dyno.id` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.release.commit][res-attr-cfg] | Toggles the `heroku.release.commit` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.release.creation_timestamp][res-attr-cfg] | Toggles the `heroku.release.creation_timestamp` resource attribute.
Sets `enabled` to `true` by default. | no +[service.instance.id][res-attr-cfg] | Toggles the `service.instance.id` resource attribute.
Sets `enabled` to `true` by default. | no +[service.name][res-attr-cfg] | Toggles the `service.name` resource attribute.
Sets `enabled` to `true` by default. | no +[service.version][res-attr-cfg] | Toggles the `service.version` resource attribute.
Sets `enabled` to `true` by default. | no + +When [Heroku dyno metadata][] is active, Heroku applications publish information through environment variables. +We map these environment variables to resource attributes as follows: + +| Dyno metadata environment variable | Resource attribute | +|------------------------------------|-------------------------------------| +| `HEROKU_APP_ID` | `heroku.app.id` | +| `HEROKU_APP_NAME` | `service.name` | +| `HEROKU_DYNO_ID` | `service.instance.id` | +| `HEROKU_RELEASE_CREATED_AT` | `heroku.release.creation_timestamp` | +| `HEROKU_RELEASE_VERSION` | `service.version` | +| `HEROKU_SLUG_COMMIT` | `heroku.release.commit` | + +For more information, see the [Heroku cloud provider documentation][] under the [OpenTelemetry specification semantic conventions][]. + +[Heroku dyno metadata]: https://devcenter.heroku.com/articles/dyno-metadata +[Heroku cloud provider documentation]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud_provider/heroku.md +[OpenTelemetry specification semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification + +### system + +The `system` block queries the host machine to retrieve various resource attributes. + +{{< admonition type="note" >}} + +Use the [Docker](#docker) detector if running {{< param "PRODUCT_ROOT_NAME" >}} as a Docker container. + +{{< /admonition >}} + +The `system` block supports the following attributes: + +Attribute | Type | Description | Default | Required +------------------ | --------------- | --------------------------------------------------------------------------- |---------------- | -------- +`hostname_sources` | `list(string)` | A priority list of sources from which the hostname will be fetched. | `["dns", "os"]` | no + +The valid options for `hostname_sources` are: +* `"dns"`: Uses multiple sources to get the fully qualified domain name. +Firstly, it looks up the host name in the local machine's `hosts` file. If that fails, it looks up the CNAME. +Lastly, if that fails, it does a reverse DNS query. Note: this hostname source may produce unreliable results on Windows. +To produce a FQDN, Windows hosts might have better results using the "lookup" hostname source, which is mentioned below. +* `"os"`: Provides the hostname provided by the local machine's kernel. +* `"cname"`: Provides the canonical name, as provided by `net.LookupCNAME` in the Go standard library. +Note: this hostname source may produce unreliable results on Windows. +* `"lookup"`: Does a reverse DNS lookup of the current host's IP address. + +In case of an error in fetching a hostname from a source, the next source from the list of `hostname_sources` will be considered. + +The `system` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#system--resource_attributes) | Configures which resource attributes to add. | no + +#### system > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +---------------------------------------|-----------------------------------------------------------------------------------------------------|--------- +[host.arch][res-attr-cfg] | Toggles the `host.arch` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.cache.l2.size][res-attr-cfg] | Toggles the `host.cpu.cache.l2.size` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.family][res-attr-cfg] | Toggles the `host.cpu.family` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.model.id][res-attr-cfg] | Toggles the `host.cpu.model.id` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.model.name][res-attr-cfg] | Toggles the `host.cpu.model.name` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.stepping][res-attr-cfg] | Toggles the `host.cpu.stepping` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.vendor.id][res-attr-cfg] | Toggles the `host.cpu.vendor.id` resource attribute.
Sets `enabled` to `false` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `false` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[os.description][res-attr-cfg] | Toggles the `os.description` resource attribute.
Sets `enabled` to `false` by default. | no +[os.type][res-attr-cfg] | Toggles the `os.type` resource attribute.
Sets `enabled` to `true` by default. | no + +### openshift + +The `openshift` block queries the OpenShift and Kubernetes APIs to retrieve various resource attributes. + +The `openshift` block supports the following attributes: + +Attribute | Type | Description | Default | Required +---------- |---------- | ------------------------------------------------------- |-------------| -------- +`address` | `string` | Address of the OpenShift API server. | _See below_ | no +`token` | `string` | Token used to identify against the OpenShift API server.| "" | no + +The "get", "watch", and "list" permissions are required: + +```yaml +kind: ClusterRole +metadata: + name: grafana-agent +rules: +- apiGroups: ["config.openshift.io"] + resources: ["infrastructures", "infrastructures/status"] + verbs: ["get", "watch", "list"] +``` + +By default, the API address is determined from the environment variables `KUBERNETES_SERVICE_HOST`, +`KUBERNETES_SERVICE_PORT` and the service token is read from `/var/run/secrets/kubernetes.io/serviceaccount/token`. +If TLS is not explicitly disabled and no `ca_file` is configured, `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` is used. +The determination of the API address, `ca_file`, and the service token is skipped if they are set in the configuration. + +The `openshift` block supports the following blocks: + +Block | Description | Required +---------------------------------------------- | ---------------------------------------------------- | -------- +[resource_attributes](#openshift--resource_attributes) | Configures which resource attributes to add. | no +[tls](#openshift--tls) | TLS settings for the connection with the OpenShift API. | yes + +#### openshift > tls + +The `tls` block configures TLS settings used for the connection to the gRPC +server. + +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} + +#### openshift > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------- | --------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.cluster.name][res-attr-cfg] | Toggles the `k8s.cluster.name` resource attribute.
Sets `enabled` to `true` by default. | no + +### kubernetes_node + +The `kubernetes_node` block queries the Kubernetes API server to retrieve various node resource attributes. + +The `kubernetes_node` block supports the following attributes: + +Attribute | Type | Description | Default | Required +------------------- |--------- | ------------------------------------------------------------------------- |------------------ | -------- +`auth_type` | `string` | Configures how to authenticate to the K8s API server. | `"none"` | no +`context` | `string` | Override the current context when `auth_type` is set to `"kubeConfig"`. | `""` | no +`node_from_env_var` | `string` | The name of an environment variable from which to retrieve the node name. | `"K8S_NODE_NAME"` | no + +The "get" and "list" permissions are required: + +```yaml +kind: ClusterRole +metadata: + name: grafana-agent +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] +``` + +`auth_type` can be set to one of the following: +* `none`: no authentication. +* `serviceAccount`: use the standard service account token provided to the {{< param "PRODUCT_ROOT_NAME" >}} pod. +* `kubeConfig`: use credentials from `~/.kube/config`. + +The `kubernetes_node` block supports the following blocks: + +Block | Description | Required +---------------------------------------------- | ------------------------------------------------- | -------- +[resource_attributes](#kubernetes_node--resource_attributes) | Configures which resource attributes to add. | no + +#### kubernetes_node > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------ | ------------------------------------------------------------------------------------------ | -------- +[k8s.node.name][res-attr-cfg] | Toggles the `k8s.node.name` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.node.uid][res-attr-cfg] | Toggles the `k8s.node.uid` resource attribute.
Sets `enabled` to `true` by default. | no + +## Common configuration + +### Resource attribute config + +This block describes how to configure resource attributes such as `k8s.node.name` and `azure.vm.name`. +Every block is configured using the same set of attributes. +Only the default values for those attributes might differ across resource attributes. +For example, some resource attributes have `enabled` set to `true` by default, whereas others don't. + +The following attributes are supported: + +Attribute | Type | Description | Default | Required +--------- | ------- | ----------------------------------------------------------------------------------- |------------- | -------- +`enabled` | `bool` | Toggles whether to add the resource attribute to the span, log, or metric resource. | _See below_ | no + +To see the default value for `enabled`, refer to the tables in the sections above which list the resource attributes blocks. +The "Description" column will state either... + +> Sets `enabled` to `true` by default. + +... or: + +> Sets `enabled` to `false` by default. + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +---- | ---- | ----------- +`input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. + +`input` accepts `otelcol.Consumer` OTLP-formatted data for any telemetry signal of these types: +* logs +* metrics +* traces + +## Component health + +`otelcol.processor.resourcedetection` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`otelcol.processor.resourcedetection` doesn't expose any component-specific debug +information. + +## Examples + +### env detector + +If you set up a `OTEL_RESOURCE_ATTRIBUTES` environment variable with value of `TestKey=TestValue`, +then all logs, metrics, and traces have a resource attribute with a key `TestKey` and value of `TestValue`. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["env"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### env and ec2 + +There is no need to put in an `ec2 {}` River block. +The `ec2` defaults are applied automatically, as specified in [ec2][]. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["env", "ec2"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### ec2 with default resource attributes + +There is no need to put in a `ec2 {}` River block. +The `ec2` defaults are applied automatically, as specified in [ec2][]. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["ec2"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### ec2 with explicit resource attributes + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["ec2"] + ec2 { + tags = ["^tag1$", "^tag2$", "^label.*$"] + resource_attributes { + cloud.account.id { enabled = true } + cloud.availability_zone { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = true } + cloud.region { enabled = true } + host.id { enabled = true } + host.image.id { enabled = false } + host.name { enabled = false } + host.type { enabled = false } + } + } + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### kubernetes_node + +This example uses the default `node_from_env_var` option of `K8S_NODE_NAME`. + +There is no need to put in a `kubernetes_node {}` River block. +The `kubernetes_node` defaults are applied automatically, as specified in [kubernetes_node][]. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["kubernetes_node"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +You need to add this to your workload: + +```yaml + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +``` + +### kubernetes_node with a custom environment variable + +This example uses a custom `node_from_env_var` set to `my_custom_var`. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["kubernetes_node"] + kubernetes_node { + node_from_env_var = "my_custom_var" + } + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +You need to add this to your workload: + +```yaml + env: + - name: my_custom_var + valueFrom: + fieldRef: + fieldPath: spec.nodeName +``` + + +## Compatible components + +`otelcol.processor.resourcedetection` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.resourcedetection` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.span.md b/docs/sources/flow/reference/components/otelcol.processor.span.md index fe6985881007..ac909575cb1a 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.span.md +++ b/docs/sources/flow/reference/components/otelcol.processor.span.md @@ -400,11 +400,9 @@ otelcol.processor.span "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md index b6c6ccfdc0f7..cb651d67e4f0 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md @@ -565,11 +565,9 @@ otelcol.exporter.otlp "production" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.transform.md b/docs/sources/flow/reference/components/otelcol.processor.transform.md index 81967bb11c24..9a70c07e9509 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.transform.md +++ b/docs/sources/flow/reference/components/otelcol.processor.transform.md @@ -42,7 +42,7 @@ there is also a set of metrics-only functions: * `end_time_unix_nano - start_time_unix_nano` * `sum([1, 2, 3, 4]) + (10 / 1) - 1` -{{% admonition type="note" %}} +{{< admonition type="note" >}} There are two ways of inputting strings in River configuration files: * Using quotation marks ([normal River strings][river-strings]). Characters such as `\` and `"` must be escaped by preceding them with a `\` character. @@ -57,17 +57,17 @@ Raw strings are generally more convenient for writing OTTL statements. [river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} [river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} -{{% /admonition %}} +{{< /admonition >}} -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.transform` is a wrapper over the upstream OpenTelemetry Collector `transform` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.transform` components by giving them different labels. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} `otelcol.processor.transform` allows you to modify all aspects of your telemetry. Some specific risks are given below, but this is not an exhaustive list. It is important to understand your data before using this processor. @@ -88,7 +88,7 @@ to a new metric data type or can be used to create new metrics. [Orphaned Telemetry]: https://github.com/open-telemetry/opentelemetry-collector/blob/{{< param "OTEL_VERSION" >}}/docs/standard-warnings.md#orphaned-telemetry [no-op]: https://en.wikipedia.org/wiki/NOP_(code) [metrics data model]: https://github.com/open-telemetry/opentelemetry-specification/blob/main//specification/metrics/data-model.md -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -602,11 +602,9 @@ each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md b/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md index c19bb03dba77..4f584319fb6c 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md @@ -287,11 +287,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md index 28588420609d..abb89ef82fb3 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md @@ -339,11 +339,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.loki.md b/docs/sources/flow/reference/components/otelcol.receiver.loki.md index 31d9877da882..c06b82cbe3dc 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.loki.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.loki.md @@ -112,11 +112,9 @@ otelcol.exporter.otlp "default" { - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md b/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md index a6d7a5bb3ae3..ac694d890712 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md @@ -219,11 +219,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md index 134098ed2de4..862562508afd 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md @@ -257,11 +257,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md b/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md index d0723aad80c4..7611b0955a4b 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md @@ -111,11 +111,9 @@ otelcol.exporter.otlp "default" { - Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md index 11e6a0485e09..54891a882da4 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md @@ -230,11 +230,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md b/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md index 2dd3d8a9ccfb..5d6c903036d1 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md @@ -152,11 +152,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.exporter.agent.md b/docs/sources/flow/reference/components/prometheus.exporter.agent.md index cb2dd5cda361..a4575bb08c1b 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.agent.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.agent.md @@ -8,7 +8,8 @@ title: prometheus.exporter.agent --- # prometheus.exporter.agent -The `prometheus.exporter.agent` component collects and exposes metrics about the agent itself. + +The `prometheus.exporter.agent` component collects and exposes metrics about {{< param "PRODUCT_NAME" >}} itself. ## Usage @@ -18,6 +19,7 @@ prometheus.exporter.agent "agent" { ``` ## Arguments + `prometheus.exporter.agent` accepts no arguments. ## Exported fields @@ -31,12 +33,12 @@ an invalid configuration. ## Debug information -`prometheus.exporter.agent` does not expose any component-specific +`prometheus.exporter.agent` doesn't expose any component-specific debug information. ## Debug metrics -`prometheus.exporter.agent` does not expose any component-specific +`prometheus.exporter.agent` doesn't expose any component-specific debug metrics. ## Example @@ -80,11 +82,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.apache.md b/docs/sources/flow/reference/components/prometheus.exporter.apache.md index 08f19fa2d1d9..d3f786083b37 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.apache.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.apache.md @@ -96,11 +96,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.azure.md b/docs/sources/flow/reference/components/prometheus.exporter.azure.md index ea8fa08cd912..1835e5e24745 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.azure.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.azure.md @@ -180,11 +180,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md b/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md index 24fe248d5e23..fb2a2653e983 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md @@ -116,7 +116,7 @@ prometheus.exporter.blackbox "example" { address = "http://grafana.com" module = "http_2xx" labels = { - "env": "dev", + "env" = "dev", } } } @@ -204,11 +204,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md b/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md index 02c923ebe898..b6cdf1f98e21 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md @@ -135,11 +135,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md index 2c1682a5fccc..0aad4bd0d8e7 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md @@ -147,9 +147,9 @@ You can use the following blocks in`prometheus.exporter.cloudwatch` to configure | static > metric | [metric][] | Configures the list of metrics the job should scrape. Multiple metrics can be defined inside one job. | yes | | decoupled_scraping | [decoupled_scraping][] | Configures the decoupled scraping feature to retrieve metrics on a schedule and return the cached metrics. | no | -{{% admonition type="note" %}} +{{< admonition type="note" >}} The `static` and `discovery` blocks are marked as not required, but you must configure at least one static or discovery job. -{{% /admonition %}} +{{< /admonition >}} [discovery]: #discovery-block [static]: #static-block @@ -463,11 +463,9 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.consul.md b/docs/sources/flow/reference/components/prometheus.exporter.consul.md index 81185047459e..6a38931ad0d0 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.consul.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.consul.md @@ -106,11 +106,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md b/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md index 2f22e0048807..bf60a1fee166 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md @@ -96,11 +96,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md b/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md index 6feb9c683eeb..f7150a3d41b4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md @@ -15,10 +15,10 @@ The `prometheus.exporter.elasticsearch` component embeds [elasticsearch_exporter](https://github.com/prometheus-community/elasticsearch_exporter) for the collection of metrics from ElasticSearch servers. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Currently, an Agent can only collect metrics from a single ElasticSearch server. However, the exporter can collect the metrics from all nodes through that server configured. -{{% /admonition %}} +{{< /admonition >}} We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus-community/elasticsearch_exporter#elasticsearch-7x-security-privileges). @@ -139,11 +139,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md b/docs/sources/flow/reference/components/prometheus.exporter.gcp.md index e9a3d7ab2786..b7ff3158c372 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.gcp.md @@ -59,9 +59,9 @@ prometheus.exporter.gcp "pubsub" { You can use the following arguments to configure the exporter's behavior. Omitted fields take their default values. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Please note that if you are supplying a list of strings for the `extra_filters` argument, any string values within a particular filter string must be enclosed in escaped double quotes. For example, `loadbalancing.googleapis.com:resource.labels.backend_target_name="sample-value"` must be encoded as `"loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""` in the River config. -{{% /admonition %}} +{{< /admonition >}} | Name | Type | Description | Default | Required | | ------------------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -------- | @@ -182,11 +182,9 @@ prometheus.exporter.gcp "lb_subset_with_filter" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.github.md b/docs/sources/flow/reference/components/prometheus.exporter.github.md index 753458562ab5..662617299da4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.github.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.github.md @@ -104,11 +104,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md b/docs/sources/flow/reference/components/prometheus.exporter.kafka.md index 59400eea67fe..1de06212f557 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.kafka.md @@ -116,11 +116,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md b/docs/sources/flow/reference/components/prometheus.exporter.memcached.md index bd158d76a996..7e9cc9a53d87 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.memcached.md @@ -108,11 +108,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md b/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md index 1aa855542c06..4301eee4f4d2 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md @@ -13,9 +13,9 @@ title: prometheus.exporter.mongodb The `prometheus.exporter.mongodb` component embeds percona's [`mongodb_exporter`](https://github.com/percona/mongodb_exporter). -{{% admonition type="note" %}} +{{< admonition type="note" >}} This exporter doesn't collect metrics from multiple nodes. For this integration to work properly, you must have connect each node of your MongoDB cluster to a {{< param "PRODUCT_NAME" >}} instance. -{{% /admonition %}} +{{< /admonition >}} We strongly recommend configuring a separate user for {{< param "PRODUCT_NAME" >}}, giving it only the strictly mandatory security privileges necessary for monitoring your node. Refer to the [Percona documentation](https://github.com/percona/mongodb_exporter#permissions) for more information. @@ -97,11 +97,9 @@ prometheus.remote_write "default" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md index e2bcad76830e..6db00954f332 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md @@ -339,11 +339,9 @@ queries: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md b/docs/sources/flow/reference/components/prometheus.exporter.mysql.md index 7c0cb90ae69f..edc1c1a5a49f 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mysql.md @@ -221,11 +221,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md b/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md index 10712ba290d5..4053acc074b0 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md @@ -109,11 +109,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md index 39cfd8770108..f50e9fd77709 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md @@ -222,11 +222,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.process.md b/docs/sources/flow/reference/components/prometheus.exporter.process.md index ddd315f28797..da135994fd7b 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.process.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.process.md @@ -142,11 +142,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.redis.md b/docs/sources/flow/reference/components/prometheus.exporter.redis.md index cebbbdd02906..ccb114ea8db5 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.redis.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.redis.md @@ -140,11 +140,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md b/docs/sources/flow/reference/components/prometheus.exporter.snmp.md index 1e69da7fb941..5bd05efed907 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.snmp.md @@ -14,9 +14,9 @@ title: prometheus.exporter.snmp The `prometheus.exporter.snmp` component embeds [`snmp_exporter`](https://github.com/prometheus/snmp_exporter). `snmp_exporter` lets you collect SNMP data and expose them as Prometheus metrics. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `prometheus.exporter.snmp` uses the latest configuration introduced in version 0.23 of the Prometheus `snmp_exporter`. -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -40,7 +40,8 @@ Omitted fields take their default values. | `config_file` | `string` | SNMP configuration file defining custom modules. | | no | | `config` | `string` or `secret` | SNMP configuration as inline string. | | no | -The `config_file` argument points to a YAML file defining which snmp_exporter modules to use. See [snmp_exporter](https://github.com/prometheus/snmp_exporter#generating-configuration) for details on how to generate a config file. +The `config_file` argument points to a YAML file defining which snmp_exporter modules to use. +Refer to [snmp_exporter](https://github.com/prometheus/snmp_exporter#generating-configuration) for details on how to generate a configuration file. The `config` argument must be a YAML document as string defining which SNMP modules and auths to use. `config` is typically loaded by using the exports of another component. For example, @@ -207,11 +208,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md b/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md index f384fd1a6805..9211f9424cbe 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md @@ -110,11 +110,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.squid.md b/docs/sources/flow/reference/components/prometheus.exporter.squid.md index 49a8639c129d..957297d4af4e 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.squid.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.squid.md @@ -102,11 +102,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md b/docs/sources/flow/reference/components/prometheus.exporter.statsd.md index 2e00b8db35b0..d7b2e7fc48df 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.statsd.md @@ -135,11 +135,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.unix.md b/docs/sources/flow/reference/components/prometheus.exporter.unix.md index ab2d88c8175e..7f3f4ca935cf 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.unix.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.unix.md @@ -418,11 +418,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md b/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md index 61c951e9c71d..499805179f11 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md @@ -98,11 +98,9 @@ prometheus.remote_write "default" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.windows.md b/docs/sources/flow/reference/components/prometheus.exporter.windows.md index 8042b5458d1c..14e22d13d2b7 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.windows.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.windows.md @@ -14,12 +14,13 @@ The `prometheus.exporter.windows` component embeds [windows_exporter](https://github.com/prometheus-community/windows_exporter) which exposes a wide variety of hardware and OS metrics for Windows-based systems. -The `windows_exporter` itself comprises various _collectors_, which can be -enabled and disabled at will. For more information on collectors, refer to the -[`collectors-list`](#collectors-list) section. +The `windows_exporter` itself comprises various _collectors_, which you can enable and disable as needed. +For more information on collectors, refer to the [`collectors-list`](#collectors-list) section. -**Note** The black and white list config options are available for backwards compatibility but are deprecated. The include -and exclude config options are preferred going forward. +{{< admonition type="note" >}} +The black and white list configuration options are available for backwards compatibility but are deprecated. +The include and exclude configuration options are preferred going forward. +{{< /admonition >}} ## Usage @@ -29,17 +30,18 @@ prometheus.exporter.windows "LABEL" { ``` ## Arguments + The following arguments can be used to configure the exporter's behavior. All arguments are optional. Omitted fields take their default values. -| Name | Type | Description | Default | Required | -|----------------------|------------------|-------------------------------------------|---------|----------| -| `enabled_collectors` | `list(string)` | List of collectors to enable. | `["cpu","cs","logical_disk","net","os","service","system"]` | no | -| `timeout` | `duration` | Configure timeout for collecting metrics. | `4m` | no | +| Name | Type | Description | Default | Required | +|----------------------|----------------|-------------------------------------------|-------------------------------------------------------------|----------| +| `enabled_collectors` | `list(string)` | List of collectors to enable. | `["cpu","cs","logical_disk","net","os","service","system"]` | no | +| `timeout` | `duration` | Configure timeout for collecting metrics. | `4m` | no | -`enabled_collectors` defines a hand-picked list of enabled-by-default -collectors. If set, anything not provided in that list is disabled by -default. See the [Collectors list](#collectors-list) for the default set. +`enabled_collectors` defines a hand-picked list of enabled-by-default collectors. +If set, anything not provided in that list is disabled by default. +Refer to the [Collectors list](#collectors-list) for the default set. ## Blocks @@ -75,15 +77,17 @@ text_file | [text_file][] | Configures the text_file collector. | [text_file]: #textfile-block ### dfsr block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- + +Name | Type | Description | Default | Required +-----------------|----------------|------------------------------------------------------|------------------------------------|--------- `source_enabled` | `list(string)` | Comma-separated list of DFSR Perflib sources to use. | `["connection","folder","volume"]` | no ### exchange block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`enabled_list` | `string` | Comma-separated list of collectors to use. | `""` | no + +Name | Type | Description | Default | Required +---------------|----------|--------------------------------------------|---------|--------- +`enabled_list` | `string` | Comma-separated list of collectors to use. | `""` | no The collectors specified by `enabled_list` can include the following: @@ -101,86 +105,96 @@ For example, `enabled_list` may be set to `"AvailabilityService,OutlookWebAccess ### iis block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`app_exclude` | `string` | Regular expression of applications to ignore. | `""` | no -`app_include` | `string` | Regular expression of applications to report on. | `".*"` | no -`site_exclude` | `string` | Regular expression of sites to ignore. | `""` | no -`site_include` | `string` | Regular expression of sites to report on. | `".*"` | no + +Name | Type | Description | Default | Required +---------------|----------|--------------------------------------------------|---------|--------- +`app_exclude` | `string` | Regular expression of applications to ignore. | `""` | no +`app_include` | `string` | Regular expression of applications to report on. | `".*"` | no +`site_exclude` | `string` | Regular expression of sites to ignore. | `""` | no +`site_include` | `string` | Regular expression of sites to report on. | `".*"` | no ### logical_disk block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regular expression of volumes to exclude. | `""` | no -`include` | `string` | Regular expression of volumes to include. | `".+"` | no + +Name | Type | Description | Default | Required +----------|----------|-------------------------------------------|---------|--------- +`exclude` | `string` | Regular expression of volumes to exclude. | `""` | no +`include` | `string` | Regular expression of volumes to include. | `".+"` | no Volume names must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude` to be included. ### msmq block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no + +Name | Type | Description | Default | Required +---------------|----------|-------------------------------------------------|---------|--------- +`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no Specifying `enabled_classes` is useful to limit the response to the MSMQs you specify, reducing the size of the response. ### mssql block + Name | Type | Description | Default | Required ---- |----------| ----------- | ------- | -------- `enabled_classes` | `list(string)` | Comma-separated list of MSSQL WMI classes to use. | `["accessmethods", "availreplica", "bufman", "databases", "dbreplica", "genstats", "locks", "memmgr", "sqlstats", "sqlerrorstransactions"]` | no ### network block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regular expression of NIC:s to exclude. | `""` | no -`include` | `string` | Regular expression of NIC:s to include. | `".*"` | no + +Name | Type | Description | Default | Required +----------|----------|-----------------------------------------|---------|--------- +`exclude` | `string` | Regular expression of NIC:s to exclude. | `""` | no +`include` | `string` | Regular expression of NIC:s to include. | `".*"` | no NIC names must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude` to be included. ### process block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regular expression of processes to exclude. | `""` | no -`include` | `string` | Regular expression of processes to include. | `".*"` | no + +Name | Type | Description | Default | Required +----------|----------|---------------------------------------------|---------|--------- +`exclude` | `string` | Regular expression of processes to exclude. | `""` | no +`include` | `string` | Regular expression of processes to include. | `".*"` | no Processes must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude` to be included. ### scheduled_task block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regexp of tasks to exclude. | `""` | no -`include` | `string` | Regexp of tasks to include. | `".+"` | no + +Name | Type | Description | Default | Required +----------|----------|-----------------------------|---------|--------- +`exclude` | `string` | Regexp of tasks to exclude. | `""` | no +`include` | `string` | Regexp of tasks to include. | `".+"` | no For a server name to be included, it must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude`. ### service block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`use_api` | `string` | Use API calls to collect service data instead of WMI. | `false` | no -`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no + +Name | Type | Description | Default | Required +---------------|----------|-------------------------------------------------------|---------|--------- +`use_api` | `string` | Use API calls to collect service data instead of WMI. | `false` | no +`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no The `where_clause` argument can be used to limit the response to the services you specify, reducing the size of the response. If `use_api` is enabled, 'where_clause' won't be effective. ### smtp block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regexp of virtual servers to ignore. | | no -`include` | `string` | Regexp of virtual servers to include. | `".+"` | no + +Name | Type | Description | Default | Required +----------|----------|---------------------------------------|---------|--------- +`exclude` | `string` | Regexp of virtual servers to ignore. | | no +`include` | `string` | Regexp of virtual servers to include. | `".+"` | no For a server name to be included, it must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude`. ### text_file block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- + +Name | Type | Description | Default | Required +----------------------|----------|----------------------------------------------------|-------------------------------------------------------|--------- `text_file_directory` | `string` | The directory containing the files to be ingested. | `C:\Program Files\Grafana Agent Flow\textfile_inputs` | no When `text_file_directory` is set, only files with the extension `.prom` inside the specified directory are read. Each `.prom` file found must end with an empty line feed to work properly. @@ -270,12 +284,12 @@ Name | Description | Enabled by default [vmware_blast](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.vmware_blast.md) | VMware Blast session metrics | [vmware](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent | -See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples. +Refer to the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples. -{{% admonition type="caution" %}} -Certain collectors will cause {{< param "PRODUCT_ROOT_NAME" >}} to crash if those collectors are used and the required infrastructure is not installed. -These include but are not limited to mscluster_*, vmware, nps, dns, msmq, teradici_pcoip, ad, hyperv, and scheduled_task. -{{% /admonition %}} +{{< admonition type="caution" >}} +Certain collectors will cause {{< param "PRODUCT_ROOT_NAME" >}} to crash if those collectors are used and the required infrastructure isn't installed. +These include but aren't limited to mscluster_*, vmware, nps, dns, msmq, teradici_pcoip, ad, hyperv, and scheduled_task. +{{< /admonition >}} ## Example @@ -317,11 +331,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md b/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md index fa324640d0ee..b8ef773567ca 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md +++ b/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md @@ -265,11 +265,9 @@ prometheus.operator.podmonitors "pods" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.operator.probes.md b/docs/sources/flow/reference/components/prometheus.operator.probes.md index 256634a88438..c8fddb96e1dd 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.probes.md +++ b/docs/sources/flow/reference/components/prometheus.operator.probes.md @@ -267,11 +267,9 @@ prometheus.operator.probes "probes" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md index 8b2e0ce29cdf..29a6414a6339 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md +++ b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md @@ -267,11 +267,9 @@ prometheus.operator.servicemonitors "services" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.receive_http.md b/docs/sources/flow/reference/components/prometheus.receive_http.md index d48985cc3f18..38d43cef5067 100644 --- a/docs/sources/flow/reference/components/prometheus.receive_http.md +++ b/docs/sources/flow/reference/components/prometheus.receive_http.md @@ -138,11 +138,9 @@ prometheus.remote_write "local" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.relabel.md b/docs/sources/flow/reference/components/prometheus.relabel.md index 65cb02394d4a..22d6c0a42d28 100644 --- a/docs/sources/flow/reference/components/prometheus.relabel.md +++ b/docs/sources/flow/reference/components/prometheus.relabel.md @@ -181,11 +181,9 @@ The two resulting metrics are then propagated to each receiver defined in the - Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.remote_write.md b/docs/sources/flow/reference/components/prometheus.remote_write.md index f869343e0919..5664cd10aa6e 100644 --- a/docs/sources/flow/reference/components/prometheus.remote_write.md +++ b/docs/sources/flow/reference/components/prometheus.remote_write.md @@ -418,11 +418,9 @@ Any labels that start with `__` will be removed before sending to the endpoint. - Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.scrape.md b/docs/sources/flow/reference/components/prometheus.scrape.md index 8adf775687f1..cd204221030d 100644 --- a/docs/sources/flow/reference/components/prometheus.scrape.md +++ b/docs/sources/flow/reference/components/prometheus.scrape.md @@ -51,6 +51,7 @@ Name | Type | Description | Default | Required `enable_protobuf_negotiation` | `bool` | Whether to enable protobuf negotiation with the client. | `false` | no `honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no `honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no +`track_timestamps_staleness` | `bool` | Indicator whether to track the staleness of the scraped timestamps. | `false` | no `params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no `scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no `scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no @@ -76,6 +77,20 @@ Name | Type | Description | Default | Required - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. +`track_timestamps_staleness` controls whether Prometheus tracks [staleness][prom-staleness] of metrics which with an explicit timestamp present in scraped data. +* An "explicit timestamp" is an optional timestamp in the [Prometheus metrics exposition format][prom-text-exposition-format]. For example, this sample has a timestamp of `1395066363000`: + ``` + http_requests_total{method="post",code="200"} 1027 1395066363000 + ``` +* If `track_timestamps_staleness` is set to `true`, a staleness marker will be inserted when a metric is no longer present or the target is down. +* A "staleness marker" is just a {{< term "sample" >}}sample{{< /term >}} with a specific NaN value which is reserved for internal use by Prometheus. +* It is recommended to set `track_timestamps_staleness` to `true` if the database where metrics are written to has enabled [out of order ingestion][mimir-ooo]. +* If `track_timestamps_staleness` is set to `false`, samples with explicit timestamps will only be labeled as stale after a certain time period, which in Prometheus is 5 minutes by default. + +[prom-text-exposition-format]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format +[prom-staleness]: https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness +[mimir-ooo]: https://grafana.com/docs/mimir/latest/configure/configure-out-of-order-samples-ingestion/ + ## Blocks The following blocks are supported inside the definition of `prometheus.scrape`: @@ -298,11 +313,9 @@ Special labels added after a scrape - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/pyroscope.ebpf.md b/docs/sources/flow/reference/components/pyroscope.ebpf.md index a324e71293ab..590ad574baf9 100644 --- a/docs/sources/flow/reference/components/pyroscope.ebpf.md +++ b/docs/sources/flow/reference/components/pyroscope.ebpf.md @@ -18,9 +18,9 @@ title: pyroscope.ebpf `pyroscope.ebpf` configures an ebpf profiling job for the current host. The collected performance profiles are forwarded to the list of receivers passed in `forward_to`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} To use the `pyroscope.ebpf` component you must run {{< param "PRODUCT_NAME" >}} as root and inside host pid namespace. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `pyroscope.ebpf` components by giving them different labels, however it is not recommended as it can lead to additional memory and CPU usage. @@ -95,16 +95,20 @@ can help you pin down a profiling target. | `__name__` | pyroscope metric name. Defaults to `process_cpu`. | | `__container_id__` | The container ID derived from target. | -### Container ID +### Targets -Each collected stack trace is then associated with a specified target from the targets list, determined by a -container ID. This association process involves checking the `__container_id__`, `__meta_docker_container_id`, -and `__meta_kubernetes_pod_container_id` labels of a target against the `/proc/{pid}/cgroup` of a process. +One of the following special labels _must_ be included in each target of `targets` and the label must correspond to the container or process that is profiled: -If a corresponding container ID is found, the stack traces are aggregated per target based on the container ID. -If a container ID is not found, the stack trace is associated with a `default_target`. +* `__container_id__`: The container ID. +* `__meta_docker_container_id`: The ID of the Docker container. +* `__meta_kubernetes_pod_container_id`: The ID of the Kubernetes pod container. +* `__process_pid__` : The process ID. -Any stack traces not associated with a listed target are ignored. +Each process is then associated with a specified target from the targets list, determined by a container ID or process PID. + +If a process's container ID matches a target's container ID label, the stack traces are aggregated per target based on the container ID. +If a process's PID matches a target's process PID label, the stack traces are aggregated per target based on the process PID. +Otherwise the process is not profiled. ### Service name @@ -298,11 +302,9 @@ pyroscope.ebpf "default" { - Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/pyroscope.java.md b/docs/sources/flow/reference/components/pyroscope.java.md new file mode 100644 index 000000000000..92407132e99d --- /dev/null +++ b/docs/sources/flow/reference/components/pyroscope.java.md @@ -0,0 +1,189 @@ +--- +aliases: + - /docs/grafana-cloud/agent/flow/reference/components/pyroscope.java/ + - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.java/ + - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.java/ + - /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.java/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.java/ +description: Learn about pyroscope.java +title: pyroscope.java +--- + +# pyroscope.java + +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} + +`pyroscope.java` continuously profiles Java processes running on the local Linux OS +using [async-profiler](https://github.com/async-profiler/async-profiler). + +{{< admonition type="note" >}} +To use the `pyroscope.java` component you must run {{< param "PRODUCT_NAME" >}} as root and inside host PID namespace. +{{< /admonition >}} + +## Usage + +```river +pyroscope.java "LABEL" { + targets = TARGET_LIST + forward_to = RECEIVER_LIST +} +``` + +## Arguments + +The following arguments are supported: + +| Name | Type | Description | Default | Required | +|--------------|--------------------------|--------------------------------------------------|---------|----------| +| `targets` | `list(map(string))` | List of java process targets to profile. | | yes | +| `forward_to` | `list(ProfilesReceiver)` | List of receivers to send collected profiles to. | | yes | +| `tmp_dir` | `string` | Temporary directory to store async-profiler. | `/tmp` | no | + +## Profiling behavior + +The special label `__process_pid__` _must always_ be present in each target of `targets` and corresponds to the `PID` of +the process to profile. + +After component startup, `pyroscope.java` creates a temporary directory under `tmp_dir` and extracts the +async-profiler binaries for both glibc and musl into the directory with the following layout. + +``` +/tmp/grafana-agent-asprof-glibc-{SHA1}/bin/asprof +/tmp/grafana-agent-asprof-glibc-{SHA1}/lib/libasyncProfiler.so +/tmp/grafana-agent-asprof-musl-{SHA1}/bin/asprof +/tmp/grafana-agent-asprof-musl-{SHA1}/lib/libasyncProfiler.so +``` + +After process profiling startup, the component detects libc type and copies according `libAsyncProfiler.so` into the +target process file system at the exact same path. + +{{< admonition type="note" >}} +The `asprof` binary runs with root permissions. +If you change the `tmp_dir` configuration to something other than `/tmp`, then you must ensure that the +directory is only writable by root. +{{< /admonition >}} + +#### `targets` argument + +The special `__process_pid__` label _must always_ be present and corresponds to the +process PID that is used for profiling. + +Labels starting with a double underscore (`__`) are treated as _internal_, and are removed prior to scraping. + +The special label `service_name` is required and must always be present. +If it is not specified, `pyroscope.scrape` will attempt to infer it from +either of the following sources, in this order: +1. `__meta_kubernetes_pod_annotation_pyroscope_io_service_name` which is a `pyroscope.io/service_name` pod annotation. +2. `__meta_kubernetes_namespace` and `__meta_kubernetes_pod_container_name` +3. `__meta_docker_container_name` +4. `__meta_dockerswarm_container_label_service_name` or `__meta_dockerswarm_service_name` + +If `service_name` is not specified and could not be inferred, then it is set to `unspecified`. + +## Blocks + +The following blocks are supported inside the definition of +`pyroscope.java`: + +| Hierarchy | Block | Description | Required | +|------------------|----------------------|----------------------------------------|----------| +| profiling_config | [profiling_config][] | Describes java profiling configuration. | no | + +[profiling_config]: #profiling_config-block + +### profiling_config block + +The `profiling_config` block describes how async-profiler is invoked. + +The following arguments are supported: + +| Name | Type | Description | Default | Required | +|---------------|------------|---------------------------------------------------------------------------------------------------------|---------|----------| +| `interval` | `duration` | How frequently to collect profiles from the targets. | "60s" | no | +| `cpu` | `bool` | A flag to enable cpu profiling, using `itimer` async-profiler event. | true | no | +| `sample_rate` | `int` | CPU profiling sample rate. It is converted from Hz to interval and passed as `-i` arg to async-profiler. | 100 | no | +| `alloc` | `string` | Allocation profiling sampling configuration It is passed as `--alloc` arg to async-profiler. | "512k" | no | +| `lock` | `string` | Lock profiling sampling configuration. It is passed as `--lock` arg to async-profiler. | "10ms" | no | + +For more information on async-profiler configuration, see [profiler-options](https://github.com/async-profiler/async-profiler?tab=readme-ov-file#profiler-options) + +## Exported fields + +`pyroscope.java` does not export any fields that can be referenced by other +components. + +## Component health + +`pyroscope.java` is only reported as unhealthy when given an invalid +configuration. In those cases, exported fields retain their last healthy +values. + +## Debug information + +`pyroscope.java` does not expose any component-specific debug information. + +## Debug metrics + +`pyroscope.java` does not expose any component-specific debug metrics. + +## Examples + +### Profile every java process on the current host + +```river +pyroscope.write "staging" { + endpoint { + url = "http://localhost:4040" + } +} + +discovery.process "all" { + refresh_interval = "60s" + discover_config { + cwd = true + exe = true + commandline = true + username = true + uid = true + container_id = true + } +} + +discovery.relabel "java" { + targets = discovery.process.all.targets + rule { + action = "keep" + regex = ".*/java$" + source_labels = ["__meta_process_exe"] + } +} + +pyroscope.java "java" { + targets = discovery.relabel.java.output + forward_to = [pyroscope.write.staging.receiver] + profiling_config { + interval = "60s" + alloc = "512k" + cpu = true + sample_rate = 100 + lock = "1ms" + } +} +``` + + + +## Compatible components + +`pyroscope.java` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) + + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/pyroscope.scrape.md b/docs/sources/flow/reference/components/pyroscope.scrape.md index 74c1fa30e873..35e7022df482 100644 --- a/docs/sources/flow/reference/components/pyroscope.scrape.md +++ b/docs/sources/flow/reference/components/pyroscope.scrape.md @@ -114,6 +114,7 @@ either of the following sources, in this order: 1. `__meta_kubernetes_pod_annotation_pyroscope_io_service_name` which is a `pyroscope.io/service_name` pod annotation. 2. `__meta_kubernetes_namespace` and `__meta_kubernetes_pod_container_name` 3. `__meta_docker_container_name` +4. `__meta_dockerswarm_container_label_service_name` or `__meta_dockerswarm_service_name` If `service_name` is not specified and could not be inferred, then it is set to `unspecified`. @@ -522,11 +523,10 @@ discovery.http "dynamic_targets" { } pyroscope.scrape "local" { - targets = [ - {"__address__" = "localhost:4100", "service_name"="pyroscope"}, + targets = concat([ + {"__address__" = "localhost:4040", "service_name"="pyroscope"}, {"__address__" = "localhost:12345", "service_name"="agent"}, - discovery.http.dynamic_targets.targets, - ] + ], discovery.http.dynamic_targets.targets) forward_to = [pyroscope.write.local.receiver] } @@ -589,11 +589,9 @@ http://localhost:12345/debug/pprof/mutex - Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/pyroscope.write.md b/docs/sources/flow/reference/components/pyroscope.write.md index 38b6b542abc0..3012be03319c 100644 --- a/docs/sources/flow/reference/components/pyroscope.write.md +++ b/docs/sources/flow/reference/components/pyroscope.write.md @@ -168,11 +168,9 @@ pyroscope.scrape "default" { - Components that consume [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/config-blocks/http.md b/docs/sources/flow/reference/config-blocks/http.md index 39ffa5b2502c..f90944c3ff59 100644 --- a/docs/sources/flow/reference/config-blocks/http.md +++ b/docs/sources/flow/reference/config-blocks/http.md @@ -50,12 +50,12 @@ tls > windows_certificate_filter > server | [server][] | Con The `tls` block configures TLS settings for the HTTP server. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} If you add the `tls` block and reload the configuration when {{< param "PRODUCT_NAME" >}} is running, existing connections will continue communicating over plaintext. Similarly, if you remove the `tls` block and reload the configuration when {{< param "PRODUCT_NAME" >}} is running, existing connections will continue communicating over TLS. To ensure all connections use TLS, configure the `tls` block before you start {{< param "PRODUCT_NAME" >}}. -{{% /admonition %}} +{{< /admonition >}} Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -159,12 +159,12 @@ the following TLS settings are overridden and will cause an error if defined. * `client_ca` * `client_ca_file` -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} This feature is only available on Windows. TLS min and max may not be compatible with the certificate stored in the Windows certificate store. The `windows_certificate_filter` will serve the found certificate even if it is not compatible with the specified TLS version. -{{% /admonition %}} +{{< /admonition >}} ### server block diff --git a/docs/sources/flow/release-notes.md b/docs/sources/flow/release-notes.md index f8053bf3c0b3..baa91ae3d068 100644 --- a/docs/sources/flow/release-notes.md +++ b/docs/sources/flow/release-notes.md @@ -18,7 +18,7 @@ The release notes provide information about deprecations and breaking changes in For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). -{{% admonition type="note" %}} +{{< admonition type="note" >}} These release notes are specific to {{< param "PRODUCT_NAME" >}}. Other release notes for the different {{< param "PRODUCT_ROOT_NAME" >}} variants are contained on separate pages: @@ -27,7 +27,19 @@ Other release notes for the different {{< param "PRODUCT_ROOT_NAME" >}} variants [release-notes-static]: {{< relref "../static/release-notes.md" >}} [release-notes-operator]: {{< relref "../operator/release-notes.md" >}} -{{% /admonition %}} +{{< /admonition >}} + +## v0.40 + +### Breaking change: Prohibit the configuration of services within modules. + +Previously it was possible to configure the HTTP service via the [HTTP config block](https://grafana.com/docs/agent/v0.39/flow/reference/config-blocks/http/) inside of a module. +This functionality is now only available in the main configuration. + +### Breaking change: Change the default value of `disable_high_cardinality_metrics` to `true`. + +The `disable_high_cardinality_metrics` configuration argument is used by `otelcol.exporter` components such as `otelcol.exporter.otlp`. +If you need to see high cardinality metrics containing labels such as IP addresses and port numbers, you now have to explicitly set `disable_high_cardinality_metrics` to `false`. ## v0.39 diff --git a/docs/sources/flow/tasks/configure/configure-macos.md b/docs/sources/flow/tasks/configure/configure-macos.md index fc1c6677f579..8b860a010dcd 100644 --- a/docs/sources/flow/tasks/configure/configure-macos.md +++ b/docs/sources/flow/tasks/configure/configure-macos.md @@ -31,11 +31,11 @@ To configure {{< param "PRODUCT_NAME" >}} on macOS, perform the following steps: ## Configure the {{% param "PRODUCT_NAME" %}} service -{{% admonition type="note" %}} +{{< admonition type="note" >}} Due to limitations in Homebrew, customizing the service used by {{< param "PRODUCT_NAME" >}} on macOS requires changing the Homebrew formula and reinstalling {{< param "PRODUCT_NAME" >}}. -{{% /admonition %}} +{{< /admonition >}} To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the following steps: diff --git a/docs/sources/flow/tasks/estimate-resource-usage.md b/docs/sources/flow/tasks/estimate-resource-usage.md index e7b066d9e8ee..f3ed1b7aed05 100644 --- a/docs/sources/flow/tasks/estimate-resource-usage.md +++ b/docs/sources/flow/tasks/estimate-resource-usage.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/agent/flow/tasks/estimate-resource-usage/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/estimate-resource-usage/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/send-data/agent/flow/tasks/estimate-resource-usage/ + - /docs/grafana-cloud/send-data/agent/flow/tasks/estimate-resource-usage/ # Previous page aliases for backwards compatibility: - /docs/agent/flow/monitoring/resource-usage/ - /docs/grafana-cloud/agent/flow/monitoring/resource-usage/ @@ -13,7 +13,7 @@ aliases: - /docs/grafana-cloud/send-data/agent/flow/monitoring/resource-usage/ - ../monitoring/resource-usage/ # /docs/agent/latest/flow/monitoring/resource-usage/ canonical: https://grafana.com/docs/agent/latest/flow/monitoring/resource-usage/ -description: Estimate expected Agent resource usage +description: Estimate expected Grafana Agent resource usage headless: true title: Estimate resource usage menuTitle: Estimate resource usage @@ -22,24 +22,22 @@ weight: 190 # Estimate {{% param "PRODUCT_NAME" %}} resource usage -This page provides guidance for expected resource usage of -{{% param "PRODUCT_NAME" %}} for each telemetry type, based on operational -experience of some of the {{% param "PRODUCT_NAME" %}} maintainers. +This page provides guidance for expected resource usage of +{{< param "PRODUCT_NAME" >}} for each telemetry type, based on operational +experience of some of the {{< param "PRODUCT_NAME" >}} maintainers. -{{% admonition type="note" %}} - -The resource usage depends on the workload, hardware and the configuration used. +{{< admonition type="note" >}} +The resource usage depends on the workload, hardware, and the configuration used. The information on this page is a good starting point for most users, but your actual usage may be different. - -{{% /admonition %}} +{{< /admonition >}} ## Prometheus metrics The Prometheus metrics resource usage depends mainly on the number of active series that need to be scraped and the scrape interval. -As a rule of thumb, **per each 1 million active series** and with the default +As a rule of thumb, **per each 1 million active series** and with the default scrape interval, you can expect to use approximately: * 0.4 CPU cores @@ -48,8 +46,7 @@ scrape interval, you can expect to use approximately: These recommendations are based on deployments that use [clustering][], but they will broadly apply to other deployment modes. For more information on how to -deploy {{% param "PRODUCT_NAME" %}}, see -[deploying grafana agent][]. +deploy {{< param "PRODUCT_NAME" >}}, see [deploying grafana agent][]. [deploying grafana agent]: {{< relref "../get-started/deploy-agent.md" >}} [clustering]: {{< relref "../concepts/clustering.md" >}} @@ -67,7 +64,7 @@ to use approximately: These recommendations are based on Kubernetes DaemonSet deployments on clusters with relatively small number of nodes and high logs volume on each. The resource usage can be higher per each 1 MiB/second of logs if you have a large number of -small nodes due to the constant overhead of running the {{% param "PRODUCT_NAME" %}} on each node. +small nodes due to the constant overhead of running the {{< param "PRODUCT_NAME" >}} on each node. Additionally, factors such as number of labels, number of files and average log line length may all play a role in the resource usage. diff --git a/docs/sources/flow/tasks/migrate/from-prometheus.md b/docs/sources/flow/tasks/migrate/from-prometheus.md index 62fef82d3c2d..84241791ec24 100644 --- a/docs/sources/flow/tasks/migrate/from-prometheus.md +++ b/docs/sources/flow/tasks/migrate/from-prometheus.md @@ -71,10 +71,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the `convert` command can't convert a Prometheus configuration, diagnostic information is sent to `stderr`.\ You can bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion by including the `--bypass-errors` flag. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} + {{< /admonition >}} {{< code >}} @@ -143,10 +143,10 @@ Your configuration file must be a valid Prometheus configuration file rather tha 1. If your Prometheus configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and start the Agent by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=prometheus`. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. Do not use this flag in a production environment. - {{% /admonition %}} + {{< /admonition >}} ## Example diff --git a/docs/sources/flow/tasks/migrate/from-promtail.md b/docs/sources/flow/tasks/migrate/from-promtail.md index 182dec857c3b..7a0dda9b9248 100644 --- a/docs/sources/flow/tasks/migrate/from-promtail.md +++ b/docs/sources/flow/tasks/migrate/from-promtail.md @@ -71,10 +71,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the convert command can't convert a Promtail configuration, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion by including the `--bypass-errors` flag. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Promtail configuration. Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} + {{< /admonition >}} {{< code >}} @@ -139,10 +139,10 @@ Your configuration file must be a valid Promtail configuration file rather than 1. If your Promtail configuration can't be converted and loaded directly into {{< param "PRODUCT_ROOT_NAME" >}}, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and start {{< param "PRODUCT_ROOT_NAME" >}} by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=promtail`. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Promtail configuration. Do not use this flag in a production environment. - {{%/admonition %}} + {{< /admonition >}} ## Example @@ -213,7 +213,7 @@ After the configuration is converted, review the {{< param "PRODUCT_NAME" >}} co The following list is specific to the convert command and not {{< param "PRODUCT_NAME" >}}: * Check if you are using any extra command line arguments with Promtail that aren't present in your configuration file. For example, `-max-line-size`. -* Check if you are setting any environment variables, whether [expanded in the config file][] itself or consumed directly by Promtail, such as `JAEGER_AGENT_HOST`. +* Check if you are setting any environment variables, whether [expanded in the configuration file][] itself or consumed directly by Promtail, such as `JAEGER_AGENT_HOST`. * In {{< param "PRODUCT_NAME" >}}, the positions file is saved at a different location. Refer to the [loki.source.file][] documentation for more details. Check if you have any existing setup, for example, a Kubernetes Persistent Volume, that you must update to use the new positions file path. @@ -224,7 +224,7 @@ The following list is specific to the convert command and not {{< param "PRODUCT [Promtail]: https://www.grafana.com/docs/loki//clients/promtail/ [debugging]: #debugging -[expanded in the config file]: https://www.grafana.com/docs/loki//clients/promtail/configuration/#use-environment-variables-in-the-configuration +[expanded in the configuration file]: https://www.grafana.com/docs/loki//clients/promtail/configuration/#use-environment-variables-in-the-configuration {{% docs/reference %}} [local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" diff --git a/docs/sources/flow/tasks/migrate/from-static.md b/docs/sources/flow/tasks/migrate/from-static.md index ff006b514a5e..5d1b73626f60 100644 --- a/docs/sources/flow/tasks/migrate/from-static.md +++ b/docs/sources/flow/tasks/migrate/from-static.md @@ -65,7 +65,7 @@ This conversion will enable you to take full advantage of the many additional fe Replace the following: * _``_: The full path to the [Static][] configuration. - * _`H_`: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. + * _`_`: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. 1. [Run][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: @@ -74,10 +74,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the convert command can't convert a [Static][] configuration, diagnostic information is sent to `stderr`. You can use the `--bypass-errors` flag to bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Grafana Agent Static configuration. Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} + {{< /admonition >}} {{< code >}} @@ -142,10 +142,10 @@ Your configuration file must be a valid [Static] configuration file. 1. If your [Static] configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can use the `--config.bypass-conversion-errors` flag with `--config.format=static` to bypass any non-critical issues and start {{< param "PRODUCT_NAME" >}}. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Grafana Agent Static configuration. Do not use this flag in a production environment. - {{%/admonition %}} + {{< /admonition >}} ## Example diff --git a/docs/sources/flow/tutorials/flow-by-example/_index.md b/docs/sources/flow/tutorials/flow-by-example/_index.md new file mode 100644 index 000000000000..d9b037350272 --- /dev/null +++ b/docs/sources/flow/tutorials/flow-by-example/_index.md @@ -0,0 +1,17 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/ +- /docs/grafana-cloud/send-data/agent/flow/tutorials/flow-by-example/ +canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/ +description: Learn how to use Grafana Agent Flow +title: Flow by example +weight: 100 +--- + +# Flow by example + +This section provides a set of step-by-step tutorials that show how to use {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md b/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md new file mode 100644 index 000000000000..59bc59c5d17b --- /dev/null +++ b/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md @@ -0,0 +1,274 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ +- /docs/grafana-cloud/send-data/agent/flow/tutorials/first-components-and-stdlib/ +canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/first-components-and-stdlib/ +description: Learn about the basics of River and the configuration language +title: First components and introducing the standard library +weight: 20 +--- + +# First components and the standard library + +This tutorial covers the basics of the River language and the standard library. It introduces a basic pipeline that collects metrics from the host and sends them to Prometheus. + +## River basics + +[Configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/ +[Configuration language concepts]: https://grafana.com/docs/agent//flow/concepts/configuration_language/ +[Standard library documentation]: https://grafana.com/docs/agent//flow/reference/stdlib/ + +**Recommended reading** + +- [Configuration language][] +- [Configuration language concepts][] + +[River](https://github.com/grafana/river) is an HCL-inspired configuration language used to configure {{< param "PRODUCT_NAME" >}}. A River file is comprised of three things: + +1. **Attributes** + + `key = value` pairs used to configure individual settings. + + ```river + url = "http://localhost:9090" + ``` + +1. **Expressions** + + Expressions are used to compute values. They can be constant values (for example, `"localhost:9090"`), or they can be more complex (for example, referencing a component's export: `prometheus.exporter.unix.targets`. They can also be a mathematical expression: `(1 + 2) * 3`, or a standard library function call: `env("HOME")`). We will use more expressions as we go along the examples. If you are curious, you can find a list of available standard library functions in the [Standard library documentation][]. + +1. **Blocks** + + Blocks are used to configure components with groups of attributes or nested blocks. The following example block can be used to configure the logging output of {{< param "PRODUCT_NAME" >}}: + + ```river + logging { + level = "debug" + format = "json" + } + ``` + + {{< admonition type="note" >}} +The default log level is `info` and the default log format is `logfmt`. + {{< /admonition >}} + + Try pasting this into `config.river` and running `/path/to/agent run config.river` to see what happens. + + Congratulations, you've just written your first River file! You've also just written your first {{< param "PRODUCT_NAME" >}} configuration file. This configuration won't do anything, so let's add some components to it. + + {{< admonition type="note" >}} +Comments in River are prefixed with `//` and are single-line only. For example: `// This is a comment`. + {{< /admonition >}} + +## Components + +[Components]: https://grafana.com/docs/agent//flow/concepts/components/ +[Component controller]: https://grafana.com/docs/agent//flow/concepts/component_controller/ +[Components configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/components/ +[env]: https://grafana.com/docs/agent//flow/reference/stdlib/env/ + +**Recommended reading** + +- [Components][] +- [Components configuration language][] +- [Component controller][] + +Components are the building blocks of a {{< param "PRODUCT_NAME" >}} configuration. They are configured and linked to create pipelines that collect, process, and output your telemetry data. Components are configured with `Arguments` and have `Exports` that may be referenced by other components. + +Let's look at a simple example pipeline: + +```river +local.file "example" { + path = env("HOME") + "file.txt" +} + +prometheus.remote_write "local_prom" { + endpoint { + url = "http://localhost:9090/api/v1/write" + + basic_auth { + username = "admin" + password = local.file.example.content + } + } +} +``` + +{{< admonition type="note" >}} +[Component reference]: https://grafana.com/docs/agent//flow/reference/components/ + +A list of all available components can be found in the [Component reference][]. Each component has a link to its documentation, which contains a description of what the component does, its arguments, its exports, and examples. +{{< /admonition >}} + +This pipeline has two components: `local.file` and `prometheus.remote_write`. The `local.file` component is configured with a single argument, `path`, which is set by calling the [env][] standard library function to retrieve the value of the `HOME` environment variable and concatenating it with the string `"file.txt"`. The `local.file` component has a single export, `content`, which contains the contents of the file. + +The `prometheus.remote_write` component is configured with an `endpoint` block, containing the `url` attribute and a `basic_auth` block. The `url` attribute is set to the URL of the Prometheus remote write endpoint. The `basic_auth` block contains the `username` and `password` attributes, which are set to the string `"admin"` and the `content` export of the `local.file` component, respectively. The `content` export is referenced by using the syntax `local.file.example.content`, where `local.file.example` is the fully qualified name of the component (the component's type + its label) and `content` is the name of the export. + +

+Flow of example pipeline with local.file and prometheus.remote_write components +

+ +{{< admonition type="note" >}} +The `local.file` component's label is set to `"example"`, so the fully qualified name of the component is `local.file.example`. The `prometheus.remote_write` component's label is set to `"local_prom"`, so the fully qualified name of the component is `prometheus.remote_write.local_prom`. +{{< /admonition >}} + +This example pipeline still doesn't do anything, so let's add some more components to it. + +## Shipping your first metrics + +[prometheus.exporter.unix]: https://grafana.com/docs/agent//flow/reference/components/prometheus.exporter.unix/ +[prometheus.scrape]: https://grafana.com/docs/agent//flow/reference/components/prometheus.scrape/ +[prometheus.remote_write]: https://grafana.com/docs/agent//flow/reference/components/prometheus.remote_write/ + +**Recommended reading** + +- Optional: [prometheus.exporter.unix][] +- Optional: [prometheus.scrape][] +- Optional: [prometheus.remote_write][] + +Make a simple pipeline with a `prometheus.exporter.unix` component, a `prometheus.scrape` component to scrape it, and a `prometheus.remote_write` component to send the scraped metrics to Prometheus. + +```river +prometheus.exporter.unix "localhost" { + // This component exposes a lot of metrics by default, so we will keep all of the default arguments. +} + +prometheus.scrape "default" { + // Setting the scrape interval lower to make it faster to be able to see the metrics + scrape_interval = "10s" + + targets = prometheus.exporter.unix.localhost.targets + forward_to = [ + prometheus.remote_write.local_prom.receiver, + ] +} + +prometheus.remote_write "local_prom" { + endpoint { + url = "http://localhost:9090/api/v1/write" + } +} +``` + +Run {{< param "PRODUCT_NAME" >}} with: + +```bash +/path/to/agent run config.river +``` + +Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After ~15-20 seconds, you should be able to see the metrics from the `prometheus.exporter.unix` component! Try querying for `node_memory_Active_bytes` to see the active memory of your host. + +

+Screenshot of node_memory_Active_bytes query in Grafana +

+ +## Visualizing the relationship between components + +The following diagram is an example pipeline: + +

+Flow of example pipeline with a prometheus.scrape, prometheus.exporter.unix, and prometheus.remote_write components +

+ +The preceding configuration defines three components: + +- `prometheus.scrape` - A component that scrapes metrics from components that export targets. +- `prometheus.exporter.unix` - A component that exports metrics from the host, built around [node_exporter](https://github.com/prometheus/node_exporter). +- `prometheus.remote_write` - A component that sends metrics to a Prometheus remote-write compatible endpoint. + +The `prometheus.scrape` component references the `prometheus.exporter.unix` component's targets export, which is a list of scrape targets. The `prometheus.scrape` component then forwards the scraped metrics to the `prometheus.remote_write` component. + +One rule is that components can't form a cycle. This means that a component can't reference itself directly or indirectly. This is to prevent infinite loops from forming in the pipeline. + +## Exercise for the reader + +[prometheus.exporter.redis]: https://grafana.com/docs/agent//flow/reference/components/prometheus.exporter.redis/ + +**Recommended Reading** + +- Optional: [prometheus.exporter.redis][] + +Let's start a container running Redis and configure {{< param "PRODUCT_NAME" >}} to scrape metrics from it. + +```bash +docker container run -d --name flow-redis -p 6379:6379 --rm redis +``` + +Try modifying the pipeline to scrape metrics from the Redis exporter. You can refer to the [prometheus.exporter.redis][] component documentation for more information on how to configure it. + +To give a visual hint, you want to create a pipeline that looks like this: + +

+Flow of exercise pipeline, with a scrape, unix_exporter, redis_exporter, and remote_write component +

+ +{{< admonition type="note" >}} +[concat]: https://grafana.com/docs/agent//flow/reference/stdlib/concat/ + +You may find the [concat][] standard library function useful. +{{< /admonition >}} + +You can run {{< param "PRODUCT_NAME" >}} with the new configuration file by running: + +```bash +/path/to/agent run config.river +``` + +Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After the first scrape, you should be able to query for `redis` metrics as well as `node` metrics. + +To shut down the Redis container, run: + +```bash +docker container stop flow-redis +``` + +If you get stuck, you can always view a solution here: +{{< collapse title="Solution" >}} + +```river +// Configure your first components, learn about the standard library, and learn how to run Grafana Agent + +// prometheus.exporter.redis collects information about Redis and exposes +// targets for other components to use +prometheus.exporter.redis "local_redis" { + redis_addr = "localhost:6379" +} + +prometheus.exporter.unix "localhost" { } + +// prometheus.scrape scrapes the targets that it is configured with and forwards +// the metrics to other components (typically prometheus.relabel or prometheus.remote_write) +prometheus.scrape "default" { + // This is scraping too often for typical use-cases, but is easier for testing and demo-ing! + scrape_interval = "10s" + + // Here, prometheus.exporter.redis.local_redis.targets refers to the 'targets' export + // of the prometheus.exporter.redis component with the label "local_redis". + // + // If you have more than one set of targets that you would like to scrape, you can use + // the 'concat' function from the standard library to combine them. + targets = concat(prometheus.exporter.redis.local_redis.targets, prometheus.exporter.unix.localhost.targets) + forward_to = [prometheus.remote_write.local_prom.receiver] +} + +// prometheus.remote_write exports a 'receiver', which other components can forward +// metrics to and it will remote_write them to the configured endpoint(s) +prometheus.remote_write "local_prom" { + endpoint { + url = "http://localhost:9090/api/v1/write" + } +} + +``` + +{{< /collapse >}} + +## Finishing up and next steps + +You might have noticed that running {{< param "PRODUCT_NAME" >}} with the configurations created a directory called `data-agent` in the directory you ran {{< param "PRODUCT_NAME" >}} from. This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). If you look in the directory, do you notice anything interesting? The directory for each component is the fully qualified name. + +If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. Generally, you can use a persistent directory for this, as some components may use the data stored in this directory to perform their function. + +In the next tutorial, you will look at how to configure {{< param "PRODUCT_NAME" >}} to collect logs from a file and send them to Loki. You will also look at using different components to process metrics and logs before sending them. diff --git a/docs/sources/flow/tutorials/flow-by-example/get-started.md b/docs/sources/flow/tutorials/flow-by-example/get-started.md new file mode 100644 index 000000000000..5fa1bbd5b537 --- /dev/null +++ b/docs/sources/flow/tutorials/flow-by-example/get-started.md @@ -0,0 +1,89 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/faq/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/faq/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/faq/ +- /docs/grafana-cloud/send-data/agent/flow/tutorials/flow-by-example/faq/ +canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/faq/ +description: Getting started with Flow-by-Example Tutorials +title: Get started +weight: 10 +--- + +## Who is this for? + +This set of tutorials contains a collection of examples that build on each other to demonstrate how to configure and use [{{< param "PRODUCT_NAME" >}}][flow]. It assumes you have a basic understanding of what {{< param "PRODUCT_ROOT_NAME" >}} is and telemetry collection in general. It also assumes a base level of familiarity with Prometheus and PromQL, Loki and LogQL, and basic Grafana navigation. It assumes no knowledge of {{< param "PRODUCT_NAME" >}} or River concepts. + +[flow]: https://grafana.com/docs/agent/latest/flow + +## What is Flow? + +Flow is a new way to configure {{< param "PRODUCT_NAME" >}}. It is a declarative configuration language that allows you to define a pipeline of telemetry collection, processing, and output. It is built on top of the [River](https://github.com/grafana/river) configuration language, which is designed to be fast, simple, and debuggable. + +## What do I need to get started? + +You will need a Linux or Unix environment with Docker installed. The examples are designed to be run on a single host so that you can run them on your laptop or in a VM. You are encouraged to follow along with the examples using a `config.river` file and experiment with the examples yourself. + +To run the examples, you should have a Grafana Agent binary available. You can follow the instructions on how to [Install Grafana Agent as a Standalone Binary](https://grafana.com/docs/agent/latest/flow/setup/install/binary/#install-grafana-agent-in-flow-mode-as-a-standalone-binary) to get a binary. + +## How should I follow along? + +You can use this docker-compose file to set up a local Grafana instance alongside Loki and Prometheus pre-configured as datasources. The examples are designed to be run locally, so you can follow along and experiment with them yourself. + +```yaml +version: '3' +services: + loki: + image: grafana/loki:2.9.0 + ports: + - "3100:3100" + command: -config.file=/etc/loki/local-config.yaml + prometheus: + image: prom/prometheus:v2.47.0 + command: + - --web.enable-remote-write-receiver + - --config.file=/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + grafana: + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + orgId: 1 + url: http://loki:3100 + basicAuth: false + isDefault: false + version: 1 + editable: false + - name: Prometheus + type: prometheus + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + version: 1 + editable: false + EOF + /run.sh + image: grafana/grafana:latest + ports: + - "3000:3000" +``` + +After running `docker-compose up`, open [http://localhost:3000](http://localhost:3000) in your browser to view the Grafana UI. + +The tutorials are designed to be followed in order and generally build on each other. Each example explains what it does and how it works. They are designed to be run locally, so you can follow along and experiment with them yourself. + +The Recommended Reading sections in each tutorial provide a list of documentation topics. To help you understand the concepts used in the example, read the recommended topics in the order given. diff --git a/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md b/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md new file mode 100644 index 000000000000..02c7c3c138f9 --- /dev/null +++ b/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md @@ -0,0 +1,308 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +- /docs/grafana-cloud/send-data/agent/flow/tutorials/logs-and-relabeling-basics/ +canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +description: Learn how to relabel metrics and collect logs +title: Logs and relabeling basics +weight: 30 +--- + +# Logs and relabeling basics + +This tutorial assumes you have completed the [First components and introducing the standard library](https://grafana.com/docs/agent//flow/tutorials/flow-by-example/first-components-and-stdlib/) tutorial, or are at least familiar with the concepts of components, attributes, and expressions and how to use them. You will cover some basic metric relabeling, followed by how to send logs to Loki. + +## Relabel metrics + +[prometheus.relabel]: https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/ + +**Recommended reading** + +- Optional: [prometheus.relabel][] + +Before moving on to logs, let's look at how we can use the `prometheus.relabel` component to relabel metrics. The `prometheus.relabel` component allows you to perform Prometheus relabeling on metrics and is similar to the `relabel_configs` section of a Prometheus scrape config. + +Let's add a `prometheus.relabel` component to a basic pipeline and see how to add labels. + +```river +prometheus.exporter.unix "localhost" { } + +prometheus.scrape "default" { + scrape_interval = "10s" + + targets = prometheus.exporter.unix.localhost.targets + forward_to = [ + prometheus.relabel.example.receiver, + ] +} + +prometheus.relabel "example" { + forward_to = [ + prometheus.remote_write.local_prom.receiver, + ] + + rule { + action = "replace" + target_label = "os" + replacement = constants.os + } +} + +prometheus.remote_write "local_prom" { + endpoint { + url = "http://localhost:9090/api/v1/write" + } +} +``` + +We have now created the following pipeline: + +![Diagram of pipeline that scrapes prometheus.exporter.unix, relabels the metrics, and remote_writes them](/media/docs/agent/diagram-flow-by-example-relabel-0.svg) + +This pipeline has a `prometheus.relabel` component that has a single rule. +This rule has the `replace` action, which will replace the value of the `os` label with a special value: `constants.os`. +This value is a special constant that is replaced with the OS of the host {{< param "PRODUCT_ROOT_NAME" >}} is running on. +You can see the other available constants in the [constants](https://grafana.com/docs/agent//flow/reference/stdlib/constants/) documentation. +This example has one rule block, but you can have as many as you want. +Each rule block is applied in order. + +If you run {{< param "PRODUCT_ROOT_NAME" >}} and navigate to [localhost:3000/explore](http://localhost:3000/explore), you can see the `os` label on the metrics. Try querying for `node_context_switches_total` and look at the labels. + +Relabeling uses the same rules as Prometheus. You can always refer to the [prometheus.relabel documentation](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/#rule-block) for a full list of available options. + +{{< admonition type="note" >}} +You can forward multiple components to one `prometheus.relabel` component. This allows you to apply the same relabeling rules to multiple pipelines. +{{< /admonition >}} + +{{< admonition type="warning" >}} +There is an issue commonly faced when relabeling and using labels that start with `__` (double underscore). These labels are considered internal and are dropped before relabeling rules from a `prometheus.relabel` component are applied. If you would like to keep or act on these kinds of labels, use a [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) component. +{{< /admonition >}} + +## Send logs to Loki + +[local.file_match]: https://grafana.com/docs/agent//flow/reference/components/local.file_match/ +[loki.source.file]: https://grafana.com/docs/agent//flow/reference/components/loki.source.file/ +[loki.write]: https://grafana.com/docs/agent//flow/reference/components/loki.write/ + +**Recommended reading** + +- Optional: [local.file_match][] +- Optional: [loki.source.file][] +- Optional: [loki.write][] + +Now that you're comfortable creating components and chaining them together, let's collect some logs and send them to Loki. We will use the `local.file_match` component to perform file discovery, the `loki.source.file` to collect the logs, and the `loki.write` component to send the logs to Loki. + +Before doing this, we need to ensure we have a log file to scrape. We will use the `echo` command to create a file with some log content. + +```bash +mkdir -p /tmp/flow-logs +echo "This is a log line" > /tmp/flow-logs/log.log +``` + +Now that we have a log file, let's create a pipeline to scrape it. + +```river +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +The rough flow of this pipeline is: + +![Diagram of pipeline that collects logs from /tmp/flow-logs and writes them to a local Loki instance](/media/docs/agent/diagram-flow-by-example-logs-0.svg) + +If you navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`, you can query for `{filename="/tmp/flow-logs/log.log"}` and see the log line we created earlier. Try running the following command to add more logs to the file. + +```bash +echo "This is another log line!" >> /tmp/flow-logs/log.log +``` + +If you re-execute the query, you can see the new log lines. + +![Grafana Explore view of example log lines](/media/docs/agent/screenshot-flow-by-example-log-lines.png) + +If you are curious how {{< param "PRODUCT_ROOT_NAME" >}} keeps track of where it is in a log file, you can look at `data-agent/loki.source.file.local_files/positions.yml`. +If you delete this file, {{< param "PRODUCT_ROOT_NAME" >}} starts reading from the beginning of the file again, which is why keeping the {{< param "PRODUCT_ROOT_NAME" >}}'s data directory in a persistent location is desirable. + +## Exercise + +[loki.relabel]: https://grafana.com/docs/agent//flow/reference/components/loki.relabel/ +[loki.process]: https://grafana.com/docs/agent//flow/reference/components/loki.process/ + +**Recommended reading** + +- [loki.relabel][] +- [loki.process][] + +### Add a Label to Logs + +This exercise will have two parts, building on the previous example. Let's start by adding an `os` label (just like the Prometheus example) to all of the logs we collect. + +Modify the following snippet to add the label `os` with the value of the `os` constant. + +```river +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< admonition type="note" >}} +You can use the [loki.relabel](https://grafana.com/docs/agent//flow/reference/components/loki.relabel) component to relabel and add labels, just like you can with the [prometheus.relabel](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel) component. +{{< /admonition >}} + +Once you have your completed configuration, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: + +```bash +echo 'level=info msg="INFO: This is an info level log!"' >> /tmp/flow-logs/log.log +echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.log +echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log +``` + +Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{filename="/tmp/flow-logs/log.log"}` and see if you can find the new label! + +Now that we have added new labels, we can also filter on them. Try querying for `{os!=""}`. You should only see the lines you added in the previous step. + +{{< collapse title="Solution" >}} + +```river +// Let's learn about relabeling and send logs to Loki! + +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.relabel.add_static_label.receiver] +} + +loki.relabel "add_static_label" { + forward_to = [loki.write.local_loki.receiver] + + rule { + target_label = "os" + replacement = constants.os + } +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< /collapse >}} + +### Extract and add a Label from Logs + +{{< admonition type="note" >}} +This exercise is more challenging than the previous one. If you are having trouble, skip it and move to the next section, which will cover some of the concepts used here. You can always come back to this exercise later. +{{< /admonition >}} + +This exercise will build on the previous one, though it's more involved. + +Let's say we want to extract the `level` from the logs and add it as a label. As a starting point, look at [loki.process][]. +This component allows you to perform processing on logs, including extracting values from log contents. + +Try modifying your configuration from the previous section to extract the `level` from the logs and add it as a label. +If needed, you can find a solution to the previous exercise at the end of the [previous section](#add-a-label-to-logs). + +{{< admonition type="note" >}} +The `stage.logfmt` and `stage.labels` blocks for `loki.process` may be helpful. +{{< /admonition >}} + +Once you have your completed config, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: + +```bash +echo 'level=info msg="INFO: This is an info level log!"' >> /tmp/flow-logs/log.log +echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.log +echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log +``` + +Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{level!=""}` to see the new labels in action. + +![Grafana Explore view of example log lines, now with the extracted 'level' label](/media/docs/agent/screenshot-flow-by-example-log-line-levels.png) + +{{< collapse title="Solution" >}} + +```river +// Let's learn about relabeling and send logs to Loki! + +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.process.add_new_label.receiver] +} + +loki.process "add_new_label" { + // Extract the value of "level" from the log line and add it to the extracted map as "extracted_level" + // You could also use "level" = "", which would extract the value of "level" and add it to the extracted map as "level" + // but to make it explicit for this example, we will use a different name. + // + // The extracted map will be covered in more detail in the next section. + stage.logfmt { + mapping = { + "extracted_level" = "level", + } + } + + // Add the value of "extracted_level" from the extracted map as a "level" label + stage.labels { + values = { + "level" = "extracted_level", + } + } + + forward_to = [loki.relabel.add_static_label.receiver] +} + +loki.relabel "add_static_label" { + forward_to = [loki.write.local_loki.receiver] + + rule { + target_label = "os" + replacement = constants.os + } +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< /collapse >}} + +## Finishing up and next steps + +You have learned the concepts of components, attributes, and expressions. You have also seen how to use some standard library components to collect metrics and logs. In the next tutorial, you will learn more about how to use the `loki.process` component to extract values from logs and use them. + diff --git a/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md b/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md new file mode 100644 index 000000000000..327b40716c30 --- /dev/null +++ b/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md @@ -0,0 +1,407 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/processing-logs/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/processing-logs/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/processing-logs/ +- /docs/grafana-cloud/send-data/agent/flow/tutorials/processing-logs/ +canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/processing-logs/ +description: Learn how to process logs +title: Processing Logs +weight: 40 +--- + +# Processing Logs + +This tutorial assumes you are familiar with setting up and connecting components. +It covers using `loki.source.api` to receive logs over HTTP, processing and filtering them, and sending them to Loki. + +## Receive logs over HTTP and Process + +**Recommended reading** + +- Optional: [loki.source.api](https://grafana.com/docs/agent//flow/reference/components/loki.source.api/) + +The `loki.source.api` component can receive logs over HTTP. +It can be useful for receiving logs from other {{< param "PRODUCT_ROOT_NAME" >}}s or collectors, or directly from applications that can send logs over HTTP, and then processing them centrally. + +Your pipeline is going to look like this: + +![Loki Source API Pipeline](/media/docs/agent/diagram-flow-by-example-logs-pipeline.svg) + +Let's start by setting up the `loki.source.api` component: + +```river +loki.source.api "listener" { + http { + listen_address = "127.0.0.1" + listen_port = 9999 + } + + labels = { "source": "api" } + + forward_to = [loki.process.process_logs.receiver] +} +``` + +This is a simple configuration. +You are configuring the `loki.source.api` component to listen on `127.0.0.1:9999` and attach a `source="api"` label to the received log entries, which are then forwarded to the `loki.process.process_logs` component's exported receiver. +Next, you can configure the `loki.process` and `loki.write` components. + +## Process and Write Logs + +**Recommended reading** + +- [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) +- [loki.process#stage.json](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagejson-block) +- [loki.process#stage.labels](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagelabels-block) + +```river +// Let's send and process more logs! + +loki.source.api "listener" { + http { + listen_address = "127.0.0.1" + listen_port = 9999 + } + + labels = { "source" = "api" } + + forward_to = [loki.process.process_logs.receiver] +} + +loki.process "process_logs" { + + // Stage 1 + stage.json { + expressions = { + log = "", + ts = "timestamp", + } + } + + // Stage 2 + stage.timestamp { + source = "ts" + format = "RFC3339" + } + + // Stage 3 + stage.json { + source = "log" + + expressions = { + is_secret = "", + level = "", + log_line = "message", + } + } + + // Stage 4 + stage.drop { + source = "is_secret" + value = "true" + } + + // Stage 5 + stage.labels { + values = { + level = "", + } + } + + // Stage 6 + stage.output { + source = "log_line" + } + + // This stage adds static values to the labels on the log line + stage.static_labels { + values = { + source = "demo-api", + } + } + + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +You can skip to the next section if you successfully completed the previous section's exercises. +If not, or if you were unsure how things worked, let's break down what is happening in the `loki.process` component. + +Many of the `stage.*` blocks in `loki.process` act on reading or writing a shared map of values extracted from the logs. +You can think of this extracted map as a hashmap or table that each stage has access to, and it is referred to as the "extracted map" from here on. +In subsequent stages, you can use the extracted map to filter logs, add or remove labels, or even modify the log line. + +{{< admonition type="note" >}} +`stage.*` blocks are executed in the order they appear in the component, top down. +{{< /admonition >}} + +Let's use an example log line to illustrate this, then go stage by stage, showing the contents of the extracted map. Here is our example log line: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "timestamp": "2023-11-16T06:01:50Z", +} +``` + +### Stage 1 + +```river +stage.json { + expressions = { + log = "", + ts = "timestamp", + } +} +``` + +This stage parses the log line as JSON, extracts two values from it, `log` and `timestamp`, and puts them into the extracted map with keys `log` and `ts`, respectively. + +{{< admonition type="note" >}} +Supplying an empty string is shorthand for using the same key as in the input log line (so `log = ""` is the same as `log = "log"`). The _keys_ of the `expressions` object end up as the keys in the extracted map, and the _values_ are used as keys to look up in the parsed log line. +{{< /admonition >}} + +If this were Python, it would be roughly equivalent to: + +```python +extracted_map = {} +log_line = {"log": {"is_secret": "true", "level": "info", "message": "This is a secret message!"}, "timestamp": "2023-11-16T06:01:50Z"} + +extracted_map["log"] = log_line["log"] +extracted_map["ts"] = log_line["timestamp"] +``` + +Extracted map _before_ performing this stage: + +```json +{} +``` + +Extracted map _after_ performing this stage: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", +} +``` + +### Stage 2 + +```river +stage.timestamp { + source = "ts" + format = "RFC3339" +} +``` + +This stage acts on the `ts` value in the map you extracted in the previous stage. +The value of `ts` is parsed in the format of `RFC3339` and added as the timestamp to be ingested by Loki. +This is useful if you want to use the timestamp present in the log itself, rather than the time the log is ingested. +This stage does not modify the extracted map. + +### Stage 3 + +```river +stage.json { + source = "log" + + expressions = { + is_secret = "", + level = "", + log_line = "message", + } +} +``` + +This stage acts on the `log` value in the extracted map, which is a value that you extracted in the previous stage. +This value is also a JSON object, so you can extract values from it as well. +This stage extracts three values from the `log` value, `is_secret`, `level`, and `log_line`, and puts them into the extracted map with keys `is_secret`, `level`, and `log_line`. + +If this were Python, it would be roughly equivalent to: + +```python +extracted_map = { + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", +} + +source = extracted_map["log"] + +extracted_map["is_secret"] = source["is_secret"] +extracted_map["level"] = source["level"] +extracted_map["log_line"] = source["message"] +``` + +Extracted map _before_ performing this stage: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", +} +``` + +Extracted map _after_ performing this stage: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", + "is_secret": "true", + "level": "info", + "log_line": "This is a secret message!", +} +``` + +### Stage 4 + +```river +stage.drop { + source = "is_secret" + value = "true" +} +``` + +This stage acts on the `is_secret` value in the extracted map, which is a value that you extracted in the previous stage. +This stage drops the log line if the value of `is_secret` is `"true"` and does not modify the extracted map. +There are many other ways to filter logs, but this is a simple example. +Refer to the [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) documentation for more information. + +### Stage 5 + +```river +stage.labels { + values = { + level = "", + } +} +``` + +This stage adds a label to the log using the same shorthand as above (so this is equivalent to using `values = { level = "level" }`). +This stage adds a label with key `level` and the value of `level` in the extracted map to the log (`"info"` from our example log line). +This stage does not modify the extracted map. + +### Stage 6 + +```river +stage.output { + source = "log_line" +} +``` + +This stage uses the `log_line` value in the extracted map to set the actual log line that is forwarded to Loki. +Rather than sending the entire JSON blob to Loki, you are only sending `original_log_line["log"]["message"]`, along with some labels that you attached. + +This stage does not modify the extracted map. + +## Putting it all together + +Now that you have all of the pieces, let's run the {{< param "PRODUCT_ROOT_NAME" >}} and send some logs to it. +Modify `config.river` with the config from the previous example and start the {{< param "PRODUCT_ROOT_NAME" >}} with: + +```bash +/path/to/agent run config.river +``` + +To get the current time in `RFC3339` format, you can run: + +```bash +date -u +"%Y-%m-%dT%H:%M:%SZ" +``` + +Try executing the following, replacing the `"timestamp"` value: + +```bash +curl localhost:9999/loki/api/v1/raw -XPOST -H "Content-Type: application/json" -d '{"log": {"is_secret": "false", "level": "debug", "message": "This is a debug message!"}, "timestamp": }' +``` + +Now that you have sent some logs, let's see how they look in Grafana. +Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. +Try querying for `{source="demo-api"}` and see if you can find the logs you sent. + +Try playing around with the values of `"level"`, `"message"`, `"timestamp"`, and `"is_secret"` and see how the logs change. +You can also try adding more stages to the `loki.process` component to extract more values from the logs, or add more labels. + +![Example Loki Logs](/media/docs/agent/screenshot-flow-by-example-processed-log-lines.png) + +## Exercise + +Since you are already using Docker and Docker exports logs, let's get those logs into Loki. +You can refer to the [discovery.docker](https://grafana.com/docs/agent//flow/reference/components/discovery.docker/) and [loki.source.docker](https://grafana.com/docs/agent//flow/reference/components/loki.source.docker/) documentation for more information. + +To ensure proper timestamps and other labels, make sure you use a `loki.process` component to process the logs before sending them to Loki. + +Although you have not used it before, let's use a `discovery.relabel` component to attach the container name as a label to the logs. +You can refer to the [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) documentation for more information. +The `discovery.relabel` component is very similar to the `prometheus.relabel` component, but is used to relabel discovered targets rather than metrics. + +{{< collapse title="Solution" >}} + +```river +// Discover docker containers to collect logs from +discovery.docker "docker_containers" { + // Note that if you are using Docker Desktop Engine this may need to be changed to + // something like "unix:///${HOME}/.docker/desktop/docker.sock" + host = "unix:///var/run/docker.sock" +} + +// Extract container name from __meta_docker_container_name label and add as label +discovery.relabel "docker_containers" { + targets = discovery.docker.docker_containers.targets + + rule { + source_labels = ["__meta_docker_container_name"] + target_label = "container" + } +} + +// Scrape logs from docker containers and send to be processed +loki.source.docker "docker_logs" { + host = "unix:///var/run/docker.sock" + targets = discovery.relabel.docker_containers.output + forward_to = [loki.process.process_logs.receiver] +} + +// Process logs and send to Loki +loki.process "process_logs" { + stage.docker { } + + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< /collapse >}} \ No newline at end of file diff --git a/docs/sources/operator/_index.md b/docs/sources/operator/_index.md index 1c9aef3aca53..a39241c87a62 100644 --- a/docs/sources/operator/_index.md +++ b/docs/sources/operator/_index.md @@ -25,17 +25,17 @@ telemetry collection: * Prometheus Operator [Probe][] resources for collecting metrics from Kubernetes [Ingresses][]. * Custom [PodLogs][] resources for collecting logs. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Grafana Agent Operator does not collect traces. -{{% /admonition %}} +{{< /admonition >}} Grafana Agent Operator is currently in [Beta][], and is subject to change or being removed with functionality which covers the same use case. -{{% admonition type="note" %}} +{{< admonition type="note" >}} If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. -{{% /admonition %}} +{{< /admonition >}} Grafana Agent Operator uses additional custom resources to manage the deployment and configuration of Grafana Agents running in static mode. In addition to the diff --git a/docs/sources/operator/deploy-agent-operator-resources.md b/docs/sources/operator/deploy-agent-operator-resources.md index 2823f58cb996..6b6f6564c85a 100644 --- a/docs/sources/operator/deploy-agent-operator-resources.md +++ b/docs/sources/operator/deploy-agent-operator-resources.md @@ -34,9 +34,9 @@ The hierarchy of custom resources is as follows: To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). -{{% admonition type="note" %}} +{{< admonition type="note" >}} Agent Operator is currently in [beta]({{< relref "../stability.md#beta" >}}) and its custom resources are subject to change. -{{% /admonition %}} +{{< /admonition >}} ## Before you begin @@ -46,9 +46,9 @@ Before you begin, make sure that you have deployed the Grafana Agent Operator CR In this section, you'll roll out a `GrafanaAgent` resource. See [Grafana Agent Operator architecture]({{< relref "./architecture" >}}) for a discussion of the resources in the `GrafanaAgent` resource hierarchy. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Due to the variety of possible deployment architectures, the official Agent Operator Helm chart does not provide built-in templates for the custom resources described in this guide. You must configure and deploy these manually as described in this section. We recommend templating and adding the following manifests to your own in-house Helm charts and GitOps flows. -{{% /admonition %}} +{{< /admonition >}} To deploy the `GrafanaAgent` resource: @@ -145,7 +145,7 @@ To deploy the `GrafanaAgent` resource: - Specifies an Agent image version. - Specifies `MetricsInstance` and `LogsInstance` selectors. These search for `MetricsInstances` and `LogsInstances` in the same namespace with labels matching `agent: grafana-agent-metrics` and `agent: grafana-agent-logs`, respectively. - - Sets a `cluster: cloud` label for all metrics shipped to your Prometheus-compatible endpoint. Change this label to your cluster name. To search for `MetricsInstances` or `LogsInstances` in a *different* namespace, use the `instanceNamespaceSelector` field. To learn more about this field, see the `GrafanaAgent` [CRD specification](https://github.com/grafana/agent/blob/main/production/operator/crds/monitoring.grafana.com_grafanaagents.yaml). + - Sets a `cluster: cloud` label for all metrics shipped to your Prometheus-compatible endpoint. Change this label to your cluster name. To search for `MetricsInstances` or `LogsInstances` in a *different* namespace, use the `instanceNamespaceSelector` field. To learn more about this field, see the `GrafanaAgent` [CRD specification](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml). 1. Customize the manifests as needed and roll them out to your cluster using `kubectl apply -f` followed by the filename. @@ -381,9 +381,9 @@ To deploy the `LogsInstance` resource into your cluster: 1. Copy the following `PodLogs` manifest to a file, then roll it to your cluster using `kubectl apply -f` followed by the filename. The manifest defines your logging targets. Agent Operator turns this into Agent configuration for the logs subsystem, and rolls it out to the DaemonSet of logging Agents. - {{% admonition type="note" %}} + {{< admonition type="note" >}} The following is a minimal working example which you should adapt to your production needs. - {{% /admonition %}} + {{< /admonition >}} ```yaml apiVersion: monitoring.grafana.com/v1alpha1 @@ -403,7 +403,7 @@ To deploy the `LogsInstance` resource into your cluster: matchLabels: {} ``` - This example tails container logs for all Pods in the `default` namespace. You can restrict the set of matched Pods by using the `matchLabels` selector. You can also set additional `pipelineStages` and create `relabelings` to add or modify log line labels. To learn more about the `PodLogs` specification and available resource fields, see the [PodLogs CRD](https://github.com/grafana/agent/blob/main/production/operator/crds/monitoring.grafana.com_podlogs.yaml). + This example tails container logs for all Pods in the `default` namespace. You can restrict the set of matched Pods by using the `matchLabels` selector. You can also set additional `pipelineStages` and create `relabelings` to add or modify log line labels. To learn more about the `PodLogs` specification and available resource fields, see the [PodLogs CRD](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml). The above `PodLogs` resource adds the following labels to log lines: diff --git a/docs/sources/operator/getting-started.md b/docs/sources/operator/getting-started.md index c59acf233391..e7393880876b 100644 --- a/docs/sources/operator/getting-started.md +++ b/docs/sources/operator/getting-started.md @@ -35,7 +35,7 @@ will fail if it can't find the Custom Resource Definitions of objects it is looking to use. To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). You can find the set of Custom Resource Definitions for Grafana Agent Operator in the Grafana Agent repository under -[production/operator/crds](https://github.com/grafana/agent/tree/main/production/operator/crds). +[`operations/agent-static-operator/crds`](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds). To deploy the CRDs: diff --git a/docs/sources/operator/helm-getting-started.md b/docs/sources/operator/helm-getting-started.md index 78245505d859..bb63f01190ce 100644 --- a/docs/sources/operator/helm-getting-started.md +++ b/docs/sources/operator/helm-getting-started.md @@ -27,7 +27,7 @@ To deploy Agent Operator with Helm, make sure that you have the following: ## Install the Agent Operator Helm Chart -In this section, you'll install the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator) into your Kubernetes cluster. This will install the latest version of Agent Operator and its [Custom Resource Definitions](https://github.com/grafana/agent/tree/main/production/operator/crds) (CRDs). The chart configures Operator to maintain a Service that lets you scrape kubelets using a `ServiceMonitor`. +In this section, you'll install the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator) into your Kubernetes cluster. This will install the latest version of Agent Operator and its [Custom Resource Definitions](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds) (CRDs). The chart configures Operator to maintain a Service that lets you scrape kubelets using a `ServiceMonitor`. To install the Agent Operator Helm chart: diff --git a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md b/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md index f70facdf3541..2997d8c140e8 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md +++ b/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md @@ -16,7 +16,7 @@ The following arguments are supported: Name | Type | Description | Default | Required -----------------------------------|-----------|------------------------------------------------------|---------|--------- -`disable_high_cardinality_metrics` | `boolean` | Whether to disable certain high cardinality metrics. | `false` | no +`disable_high_cardinality_metrics` | `boolean` | Whether to disable certain high cardinality metrics. | `true` | no `disable_high_cardinality_metrics` is the Grafana Agent equivalent to the `telemetry.disableHighCardinalityMetrics` feature gate in the OpenTelemetry Collector. It removes attributes that could cause high cardinality metrics. diff --git a/docs/sources/shared/flow/reference/components/rule-block-logs.md b/docs/sources/shared/flow/reference/components/rule-block-logs.md index 180bb7e18167..3db6449ed1b1 100644 --- a/docs/sources/shared/flow/reference/components/rule-block-logs.md +++ b/docs/sources/shared/flow/reference/components/rule-block-logs.md @@ -40,6 +40,6 @@ You can use the following actions: * `replace` - Matches `regex` to the concatenated labels. If there's a match, it replaces the content of the `target_label` using the contents of the `replacement` field. * `uppercase` - Sets `target_label` to the uppercase form of the concatenated `source_labels`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The regular expression capture groups can be referred to using either the `$CAPTURE_GROUP_NUMBER` or `${CAPTURE_GROUP_NUMBER}` notation. -{{% /admonition %}} +{{< /admonition >}} diff --git a/docs/sources/shared/flow/reference/components/rule-block.md b/docs/sources/shared/flow/reference/components/rule-block.md index 0b732954b18a..614b062b0ec6 100644 --- a/docs/sources/shared/flow/reference/components/rule-block.md +++ b/docs/sources/shared/flow/reference/components/rule-block.md @@ -40,6 +40,6 @@ You can use the following actions: * `replace` - Matches `regex` to the concatenated labels. If there's a match, it replaces the content of the `target_label` using the contents of the `replacement` field. * `uppercase` - Sets `target_label` to the uppercase form of the concatenated `source_labels`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The regular expression capture groups can be referred to using either the `$CAPTURE_GROUP_NUMBER` or `${CAPTURE_GROUP_NUMBER}` notation. -{{% /admonition %}} +{{< /admonition >}} diff --git a/docs/sources/shared/wal-data-retention.md b/docs/sources/shared/wal-data-retention.md index 973af3afb4d6..e7fa38871801 100644 --- a/docs/sources/shared/wal-data-retention.md +++ b/docs/sources/shared/wal-data-retention.md @@ -86,9 +86,9 @@ is unsuccessful, and you must manually delete the corrupted WAL to continue. If the WAL becomes corrupted, Grafana Agent writes error messages such as `err="failed to find segment for index"` to the log file. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Deleting a WAL segment or a WAL file permanently deletes the stored WAL data. -{{% /admonition %}} +{{< /admonition >}} To delete the corrupted WAL: @@ -100,12 +100,12 @@ To delete the corrupted WAL: may be different than the default depending on the [wal_directory][] setting in your Static configuration file or the path specified by the Flow [command line flag][run] `--storage-path`. - {{% admonition type="note" %}} + {{< admonition type="note" >}} There is one `wal` directory per: * Metrics instance running in Static mode * `prometheus.remote_write` component running in Flow mode - {{% /admonition %}} + {{< /admonition >}} 1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. diff --git a/docs/sources/static/_index.md b/docs/sources/static/_index.md index aab92a7991ec..4ce1f420367a 100644 --- a/docs/sources/static/_index.md +++ b/docs/sources/static/_index.md @@ -10,7 +10,7 @@ weight: 200 # Static mode -Static mode is the original mode of Grafana Agent, and is the most mature. +Static mode is the original mode of Grafana Agent. Static mode is composed of different _subsystems_: * The _metrics subsystem_ wraps around Prometheus for collecting Prometheus diff --git a/docs/sources/static/api/_index.md b/docs/sources/static/api/_index.md index 95995f5b6abb..077ba3e1b24f 100644 --- a/docs/sources/static/api/_index.md +++ b/docs/sources/static/api/_index.md @@ -23,12 +23,12 @@ API endpoints are stable unless otherwise noted. ## Config management API (Beta) -Grafana Agent exposes a config management REST API for managing instance configurations when it is running in [scraping service mode][scrape]. +Grafana Agent exposes a configuration management REST API for managing instance configurations when it's running in [scraping service mode][scrape]. -{{% admonition type="note" %}} -The scraping service mode is a requirement for the config management -API, however this is not a prerequisite for the Agent API or Ready/Healthy API. -{{% /admonition %}} +{{< admonition type="note" >}} +The scraping service mode is a requirement for the configuration management +API, however this isn't a prerequisite for the Agent API or Ready/Healthy API. +{{< /admonition >}} The following endpoints are exposed: @@ -37,6 +37,14 @@ The following endpoints are exposed: - Update config: [`PUT /agent/api/v1/config/{name}`](#update-config) - Delete config: [`DELETE /agent/api/v1/config/{name}`](#delete-config) +{{< admonition type="note" >}} +If you are running Grafana Agent in a Docker container and you want to expose the API outside the Docker container, you must change the default HTTP listen address from `127.0.0.1:12345` to a valid network interface address. +You can change the HTTP listen address with the command-line flag: `-server.http.address=0.0.0.0:12345`. +For more information, refer to the [Server](https://grafana.com/docs/agent/latest/static/configuration/flags/#server) command-line flag documentation. + +You must also publish the port in Docker. Refer to [Published ports](https://docs.docker.com/network/#published-ports) in the Docker documentation for more information. +{{< /admonition >}} + ### API response All Config Management API endpoints will return responses in the following @@ -127,13 +135,13 @@ defined in the Configuration Reference. The name field of the configuration is ignored and the name in the URL takes precedence. The request body must be formatted as YAML. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} By default, all instance configuration files that read credentials from a file on disk will be rejected. This prevents malicious users from reading the contents of arbitrary files as passwords and sending their contents to fake remote_write endpoints. To change the behavior, set `dangerous_allow_reading_files` to true in the `scraping_service` block. -{{% /admonition %}} +{{< /admonition >}} Status code: 201 with a new config, 200 on updated config. Response on success: @@ -174,9 +182,9 @@ Response on success: GET /agent/api/v1/metrics/instances ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} The deprecated alias is `/agent/api/v1/instances` -{{% /admonition %}} +{{< /admonition >}} Status code: 200 on success. Response on success: @@ -196,9 +204,9 @@ Response on success: GET /agent/api/v1/metrics/targets ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} The deprecated alias is `/agent/api/v1/targets` -{{% /admonition %}} +{{< /admonition >}} This endpoint collects all metrics subsystem targets known to the Agent across all running instances. Only targets being scraped from the local Agent will be returned. If diff --git a/docs/sources/static/configuration/_index.md b/docs/sources/static/configuration/_index.md index 92dc9a452bbd..fa1a195bd638 100644 --- a/docs/sources/static/configuration/_index.md +++ b/docs/sources/static/configuration/_index.md @@ -137,9 +137,9 @@ The following flags will configure basic auth for requests made to HTTP/S remote - `-config.url.basic-auth-user `: the basic auth username - `-config.url.basic-auth-password-file `: path to a file containing the basic auth password -{{% admonition type="note" %}} +{{< admonition type="note" >}} This beta feature is subject to change in future releases. -{{% /admonition %}} +{{< /admonition >}} {{% docs/reference %}} [flags]: "/docs/agent/ -> /docs/agent//static/configuration/flags" diff --git a/docs/sources/static/configuration/integrations/snmp-config.md b/docs/sources/static/configuration/integrations/snmp-config.md index 893348006f92..c9c4f910f908 100644 --- a/docs/sources/static/configuration/integrations/snmp-config.md +++ b/docs/sources/static/configuration/integrations/snmp-config.md @@ -14,9 +14,9 @@ The `snmp` block configures the `snmp` integration, which is an embedded version of [`snmp_exporter`](https://github.com/prometheus/snmp_exporter). This allows collection of SNMP metrics from the network devices with ease. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `snmp config` uses the latest configuration introduced in version 0.23 of the Prometheus `snmp_exporter`. -{{% /admonition %}} +{{< /admonition >}} ## Quick configuration example diff --git a/docs/sources/static/configuration/integrations/windows-exporter-config.md b/docs/sources/static/configuration/integrations/windows-exporter-config.md index 7f12117ebfbc..bcb753b0860c 100644 --- a/docs/sources/static/configuration/integrations/windows-exporter-config.md +++ b/docs/sources/static/configuration/integrations/windows-exporter-config.md @@ -62,7 +62,7 @@ Full reference of options: # List of collectors to enable. Any non-experimental collector from the # embedded version of windows_exporter can be enabled here. - [enabled_collectors: | default = "cpu,cs,logical_disk,net,os,service,system,textfile"] + [enabled_collectors: | default = "cpu,cs,logical_disk,net,os,service,system"] # Settings for collectors which accept configuration. Settings specified here # are only used if the corresponding collector is enabled in diff --git a/docs/sources/static/configuration/scraping-service.md b/docs/sources/static/configuration/scraping-service.md index bb1a4ceac925..ccfb2c67c6bf 100644 --- a/docs/sources/static/configuration/scraping-service.md +++ b/docs/sources/static/configuration/scraping-service.md @@ -168,6 +168,23 @@ container with the `grafana/agentctl` image. Tanka configurations that utilize `grafana/agentctl` and sync a set of configurations to the API are planned for the future. +## Debug Ring endpoint + +You can use the `/debug/ring` endpoint to troubleshoot issues with the scraping service in Scraping Service Mode. +It provides information about the Distributed Hash Ring and the current distribution of configurations among Agents in the cluster. +It also allows you to forget an instance in the ring manually. + +You can access this endpoint by making an HTTP request to the Agent's API server. + +Information returned by the `/debug/ring` endpoint includes: + +- The list of Agents in the cluster, and their respective tokens used for sharding. +- The list of configuration files in the KV store and associated hash values used for lookup in the ring. +- The unique instance ID assigned to each instance of the Agent running in the cluster. + The instance ID is a unique identifier assigned to each running instance of the Agent within the cluster. + The exact details of the instance ID generation might be specific to the implementation of the Grafana Agent. +- The time of the "Last Heartbeat" of each instance. The Last Heartbeat is the last time the instance was active in the ring. + {{% docs/reference %}} [api]: "/docs/agent/ -> /docs/agent//static/api" [api]: "/docs/grafana-cloud/ -> ../api" diff --git a/docs/sources/static/configuration/traces-config.md b/docs/sources/static/configuration/traces-config.md index 8ede4e9eb94e..4ff3bfc85e2a 100644 --- a/docs/sources/static/configuration/traces-config.md +++ b/docs/sources/static/configuration/traces-config.md @@ -17,11 +17,11 @@ configures its own tracing pipeline. Having multiple configs allows you to configure multiple distinct pipelines, each of which collects spans and sends them to a different location. -{{% admonition type="note" %}} +{{< admonition type="note" >}} If you are using multiple configs, you must manually set port numbers for each receiver, otherwise they will all try to use the same port and fail to start. -{{% /admonition %}} +{{< /admonition >}} ```yaml configs: diff --git a/docs/sources/static/release-notes.md b/docs/sources/static/release-notes.md index ad01a1fb4404..90afd41dfc42 100644 --- a/docs/sources/static/release-notes.md +++ b/docs/sources/static/release-notes.md @@ -46,9 +46,9 @@ that supports OTLP. ### Breaking change: The default value of `retry_on_http_429` is overriden to `true` for the `queue_config` in `remote_write` in `metrics` config. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The default set by Grafana Agent Static Mode is different than the default set by Prometheus. -{{% /admonition %}} +{{< /admonition >}} The Prometheus default value for `retry_on_http_429` is set to `true` for the `queue_config` in `remote_write`. This changed default setting allows the agent to retry sending data when it receives an HTTP 429 error and helps avoid losing data in metric pipelines. diff --git a/docs/sources/static/set-up/install/_index.md b/docs/sources/static/set-up/install/_index.md index 24663d2bdade..3e62fdbdf80d 100644 --- a/docs/sources/static/set-up/install/_index.md +++ b/docs/sources/static/set-up/install/_index.md @@ -22,24 +22,20 @@ The following architectures are supported: - macOS: AMD64 (Intel), ARM64 (Apple Silicon) - FreeBSD: AMD64 -{{% admonition type="note" %}} +{{< admonition type="note" >}} ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{% /admonition %}} +{{< /admonition >}} {{< section >}} -{{% admonition type="note" %}} +{{< admonition type="note" >}} Installing Grafana Agent on other operating systems is possible, but is not recommended or supported. -{{% /admonition %}} +{{< /admonition >}} ## Grafana Cloud Use the Grafana Agent [Kubernetes configuration](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/) or follow instructions for installing the Grafana Agent in the [Walkthrough](/docs/grafana-cloud/monitor-infrastructure/integrations/get-started/). -## Tanka - -For more information, refer to the [Tanka](https://tanka.dev) configurations in the Grafana Agent [production](https://github.com/grafana/agent/tree/main/production/tanka/grafana-agent) directory on GitHub. - ## Data collection By default, Grafana Agent sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information diff --git a/docs/sources/static/set-up/install/install-agent-binary.md b/docs/sources/static/set-up/install/install-agent-binary.md index 82e14c59119a..8d53d8376821 100644 --- a/docs/sources/static/set-up/install/install-agent-binary.md +++ b/docs/sources/static/set-up/install/install-agent-binary.md @@ -19,9 +19,9 @@ Grafana Agent is distributed as a standalone binary for the following operating * macOS: AMD64, (Intel), ARM64 (Apple Silicon) * Windows: AMD64 -{{% admonition type="note" %}} +{{< admonition type="note" >}} ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{% /admonition %}} +{{< /admonition >}} The binary executable will run Grafana Agent in standalone mode. If you want to run Grafana Agent as a service, refer to the installation instructions for: diff --git a/docs/sources/static/set-up/install/install-agent-docker.md b/docs/sources/static/set-up/install/install-agent-docker.md index ad563251c2ae..bece55596635 100644 --- a/docs/sources/static/set-up/install/install-agent-docker.md +++ b/docs/sources/static/set-up/install/install-agent-docker.md @@ -41,9 +41,9 @@ docker run \ Replace `CONFIG_FILE_PATH` with the configuration file path on your Linux host system. -{{% admonition type="note" %}} +{{< admonition type="note" >}} For the flags to work correctly, you must expose the paths on your Linux host to the Docker container through a bind mount. -{{%/admonition %}} +{{< /admonition >}} ## Run a Windows Docker container @@ -61,9 +61,9 @@ Replace the following: * `CONFIG_FILE_PATH`: The configuration file path on your Windows host system. * `WAL_DATA_DIRECTORY`: the directory used to store your metrics before sending them to Prometheus. Old WAL data is cleaned up every hour and is used for recovery if the process crashes. -{{% admonition type="note" %}} +{{< admonition type="note" >}} For the flags to work correctly, you must expose the paths on your Windows host to the Docker container through a bind mount. -{{%/admonition %}} +{{< /admonition >}} ## Next steps diff --git a/docs/sources/static/set-up/install/install-agent-kubernetes.md b/docs/sources/static/set-up/install/install-agent-kubernetes.md index 95fdd5597b53..d55a7d9af2a5 100644 --- a/docs/sources/static/set-up/install/install-agent-kubernetes.md +++ b/docs/sources/static/set-up/install/install-agent-kubernetes.md @@ -23,10 +23,10 @@ You can use the Helm chart for Grafana Agent to deploy Grafana Agent in static m ## Deploy -{{% admonition type="note" %}} +{{< admonition type="note" >}} These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for Grafana Agent. You can deploy Grafana Agent in static mode or flow mode. The Helm chart deploys flow mode by default. -{{% /admonition %}} +{{< /admonition >}} To deploy Grafana Agent in static mode on Kubernetes using Helm, run the following commands in a terminal window: @@ -52,10 +52,10 @@ To deploy Grafana Agent in static mode on Kubernetes using Helm, run the followi - _``_: The name to use for your Grafana Agent installation, such as `grafana-agent`. - {{% admonition type="warning" %}} + {{< admonition type="warning" >}} Always pass `--set agent.mode=static` in `helm install` or `helm upgrade` commands to ensure Grafana Agent gets installed in static mode. Alternatively, set `agent.mode` to `static` in your values.yaml file. - {{% /admonition %}} + {{< /admonition >}} For more information on the Grafana Agent Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. diff --git a/docs/sources/static/set-up/install/install-agent-macos.md b/docs/sources/static/set-up/install/install-agent-macos.md index 48f383ee6b7d..c23bd59ec52b 100644 --- a/docs/sources/static/set-up/install/install-agent-macos.md +++ b/docs/sources/static/set-up/install/install-agent-macos.md @@ -19,9 +19,9 @@ You can install Grafana Agent in static mode on macOS with Homebrew. Install [Homebrew][] on your computer. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{% /admonition %}} +{{< /admonition >}} [Homebrew]: https://brew.sh @@ -76,9 +76,9 @@ brew uninstall grafana-agent 1. Edit `$(brew --prefix)/etc/grafana-agent/config.yml` and add the configuration blocks for your specific telemetry needs. Refer to [Configure Grafana Agent][configure] for more information. -{{% admonition type="note" %}} +{{< admonition type="note" >}} To send your data to Grafana Cloud, set up Grafana Agent using the Grafana Cloud integration. Refer to [how to install an integration](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations/) and [macOS integration](/docs/grafana-cloud/data-configuration/integrations/integration-reference/integration-macos-node/). -{{%/admonition %}} +{{< /admonition >}} ## Next steps diff --git a/go.mod b/go.mod index 8da98c5aad41..c9800748482b 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/google/dnsmasq_exporter v0.2.1-0.20230620100026-44b14480804a github.com/google/go-cmp v0.6.0 github.com/google/go-jsonnet v0.18.0 - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 + github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 github.com/google/renameio/v2 v2.0.0 github.com/google/uuid v1.4.0 github.com/gorilla/mux v1.8.0 @@ -56,11 +56,11 @@ require ( github.com/grafana/go-gelf/v2 v2.0.1 // Loki main commit where the Prometheus dependency matches ours. TODO(@tpaschalis) Update to kXYZ branch once it's available github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a - github.com/grafana/pyroscope-go/godeltaprof v0.1.6 + github.com/grafana/pyroscope-go/godeltaprof v0.1.7 github.com/grafana/pyroscope/api v0.4.0 github.com/grafana/pyroscope/ebpf v0.4.1 github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db - github.com/grafana/river v0.3.0 + github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1 github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grafana/vmware_exporter v0.0.4-beta @@ -94,7 +94,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/ncabatoff/process-exporter v0.7.10 github.com/nerdswords/yet-another-cloudwatch-exporter v0.55.0 - github.com/ohler55/ojg v1.20.0 // indirect + github.com/ohler55/ojg v1.20.1 // indirect github.com/oklog/run v1.1.0 github.com/olekukonko/tablewriter v0.0.5 github.com/oliver006/redis_exporter v1.54.0 @@ -109,12 +109,14 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.87.0 @@ -143,7 +145,7 @@ require ( github.com/prometheus/blackbox_exporter v0.24.1-0.20230623125439-bd22efa1c900 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.45.0 + github.com/prometheus/common v0.46.0 github.com/prometheus/consul_exporter v0.8.0 github.com/prometheus/memcached_exporter v0.13.0 github.com/prometheus/mysqld_exporter v0.14.0 @@ -211,11 +213,11 @@ require ( go.uber.org/goleak v1.2.1 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.17.0 + golang.org/x/crypto v0.18.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/net v0.19.0 - golang.org/x/oauth2 v0.13.0 - golang.org/x/sys v0.15.0 + golang.org/x/net v0.20.0 + golang.org/x/oauth2 v0.16.0 + golang.org/x/sys v0.16.0 golang.org/x/text v0.14.0 golang.org/x/time v0.3.0 google.golang.org/api v0.149.0 @@ -580,7 +582,7 @@ require ( go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/sync v0.5.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/term v0.16.0 // indirect golang.org/x/tools v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -606,6 +608,7 @@ require github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab require ( connectrpc.com/connect v1.14.0 github.com/githubexporter/github-exporter v0.0.0-20231025122338-656e7dc33fe7 + github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361 github.com/natefinch/atomic v1.0.1 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0 @@ -619,7 +622,9 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 // indirect github.com/Shopify/sarama v1.38.1 // indirect + github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/Workiva/go-datastructures v1.1.0 // indirect github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0 // indirect github.com/channelmeter/iso8601duration v0.0.0-20150204201828-8da3af7a2a61 // indirect @@ -629,17 +634,20 @@ require ( github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/grafana/jfr-parser v0.8.0 // indirect github.com/hetznercloud/hcloud-go/v2 v2.4.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.87.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.87.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect @@ -684,9 +692,10 @@ replace ( // // * There is a release of Prometheus which contains // prometheus/prometheus#13002 +// and prometheus/prometheus#13497 // We use the last v1-related tag as the replace statement does not work for v2 // tags without the v2 suffix to the module root. -replace github.com/prometheus/prometheus => github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2 // grafana/prometheus@drop-old-inmemory-samples-squashed-2 +replace github.com/prometheus/prometheus => github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406 // cmp_header_order branch replace gopkg.in/yaml.v2 => github.com/rfratto/go-yaml v0.0.0-20211119180816-77389c3526dc diff --git a/go.sum b/go.sum index 3223f47d7f2b..6710608f0f42 100644 --- a/go.sum +++ b/go.sum @@ -183,6 +183,8 @@ github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButx github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw= github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962/go.mod h1:kC29dT1vFpj7py2OvG1khBdQpo3kInWP+6QipLbdngo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 h1:tk85AYGwOf6VNtoOQi8w/kVDi2vmPxp3/OU2FsUpdcA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/IBM/sarama v1.42.1 h1:wugyWa15TDEHh2kvq2gAy1IHLjEjuYOYgXz/ruC/OSQ= @@ -237,6 +239,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= +github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= +github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -990,8 +994,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8= +github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= @@ -1053,6 +1057,10 @@ github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9 h1:WB3bGH2f1UN6jkd6uAEWfHB8OD7dKJ0v2Oo6SNfhpfQ= github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/jfr-parser v0.8.0 h1:/uo2wZNXrxw7tKLFwP2omJ3EQGMkD9wzhPsRogVofc0= +github.com/grafana/jfr-parser v0.8.0/go.mod h1:M5u1ux34Qo47ZBWksbMYVk40s7dvU3WMVYpxweEu4R0= +github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361 h1:TtNajaiSRfM2Mz8N7ouFQDFlviXbIEk9Hts0yoZnhGM= +github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361/go.mod h1:P5406BrWxjahTzVF6aCSumNI1KPlZJc0zO0v+zKZ4gc= github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a h1:lvSHlNONeo/H+aWRk86QEfBpRDCEX1yoqpsCK0Tys+g= github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a/go.mod h1:a5c5ZTC6FNufKkvF8NeDAb2nCWJpgkVDrejmV+O9hac= github.com/grafana/loki/pkg/push v0.0.0-20231212100434-384e5c2dc872 h1:6kPX7bngjBgUlHqADwZ6249UtzMaoQW5n0H8bOtnYeM= @@ -1067,18 +1075,18 @@ github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e0 github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e052230/go.mod h1:kBdpzrqR2wJkOdg50yzp4dv+2XBMyeqTgF4lCx0hSpQ= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 h1:HnFWqxhoSF3WC7sKAdMZ+SRXvHLVZlZ3sbQjuUlTqkw= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520/go.mod h1:+HPXgiOV0InDHcZ2jNijL1SOKvo0eEPege5fQA0+ICI= -github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2 h1:eJD8U9G91ID/pKsLjJnjqve8yv1NiE/l6dGYnwchPVM= -github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= -github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= -github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= +github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406 h1:LVIOYe5j92m10wluP5hgeHqSkOLnZzcPxhYCkdbLXCE= +github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= +github.com/grafana/pyroscope-go/godeltaprof v0.1.7 h1:C11j63y7gymiW8VugJ9ZW0pWfxTZugdSJyC48olk5KY= +github.com/grafana/pyroscope-go/godeltaprof v0.1.7/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= github.com/grafana/pyroscope/api v0.4.0 h1:J86DxoNeLOvtJhB1Cn65JMZkXe682D+RqeoIUiYc/eo= github.com/grafana/pyroscope/api v0.4.0/go.mod h1:MFnZNeUM4RDsDOnbgKW3GWoLSBpLzMMT9nkvhHHo81o= github.com/grafana/pyroscope/ebpf v0.4.1 h1:iqQoOsfKen5KpTRe6MfGeBZfgK1s7ROH+Cs/vZs1B3A= github.com/grafana/pyroscope/ebpf v0.4.1/go.mod h1:W99Mq+yJGP5nZUQWNv+jVytiWWgWXwHjIRmi9k3xHzA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/grafana/river v0.3.0 h1:6TsaR/vkkcppUM9I0muGbPIUedCtpPu6OWreE5+CE6g= -github.com/grafana/river v0.3.0/go.mod h1:icSidCSHYXJUYy6TjGAi/D+X7FsP7Gc7cxvBUIwYMmY= +github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1 h1:mCOKdWkLv8n9X0ORWrPR+W/zLOAa1o6iM+Dfy0ofQUs= +github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1/go.mod h1:tAiNX2zt3HUsNyPNUDSvE6AgQ4+kqJvljBI+ACppMtM= github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3 h1:UPkAxuhlAcRmJT3/qd34OMTl+ZU7BLLfOO2+NXBlJpY= github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3/go.mod h1:iZiiwNT4HbtGRVqCQu7uJPEZCuEE5sfSSttcnePkDl4= github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 h1:tkT0yha3JzB5S5VNjfY4lT0cJAe20pU8XGt3Nuq73rM= @@ -1422,6 +1430,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/pp/v3 v3.2.0 h1:h33hNTZ9nVFNP3u2Fsgz8JXiF5JINoZfFq4SvKJwNcs= +github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapdz1EwA= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -1543,8 +1553,6 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq github.com/mattn/go-xmlrpc v0.0.3 h1:Y6WEMLEsqs3RviBrAa1/7qmbGB7DVD3brZIbqMbQdGY= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= @@ -1682,8 +1690,8 @@ github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnu github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/ohler55/ojg v1.20.0 h1:hmpsD9VyuoVH7bHCPtni9eCpOxiIhSlIEzNndXkCySY= -github.com/ohler55/ojg v1.20.0/go.mod h1:uHcD1ErbErC27Zhb5Df2jUjbseLLcmOCo6oxSr3jZxo= +github.com/ohler55/ojg v1.20.1 h1:Io65sHjMjYPI7yuhUr8VdNmIQdYU6asKeFhOs8xgBnY= +github.com/ohler55/ojg v1.20.1/go.mod h1:uHcD1ErbErC27Zhb5Df2jUjbseLLcmOCo6oxSr3jZxo= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v0.0.0-20180308005104-6934b124db28/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -1737,6 +1745,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2client github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.87.0/go.mod h1:DRpgdIDMa+CFE96SoEPwigGBuZbwSNWotTgkJlrZMVc= github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0 h1:Z4o71/rS7mmpJ/9uzta3/nTaT+vKt0CU35o4inDLA9Y= github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0/go.mod h1:clScLUe8m0CTZMcV0scqq+fFFvw5Q1dASkYlYsrRptM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0 h1:JJsQ6iMFIDb7W6uLh6LQ5k4XOgWolr7ugVBoeV4l7hQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0/go.mod h1:rDdtaUrMV6TJHqssyiYSfsLfFN1pIg4JOTDaE9AUapQ= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0 h1:W4Ty2pSyge/qNAOILO6HqyKrAcgALs0bn5CmpGZJXVo= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0/go.mod h1:3EFmVoLcdM8Adj75N8TGJ4txDB29oW1chTLCFiL/wxs= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.87.0 h1:ekT4/I9J484j4yR/0VHj5AGtgv8KmNd+e4oXxNJNR/o= @@ -1749,6 +1759,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.87 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.87.0/go.mod h1:ntSfqIeoGj0O+pXXyqDG9iTAw/PQg2JsO26EJ1GAKto= github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0 h1:kDamu7uZHRmeJWqaJg42LSgprRGokmQ4t8ACslzS0GU= github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0/go.mod h1:EAw9aBkrDIDWQvRBdJiDkaJmCqcgZpiZzYZEvOjg4uI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0 h1:8pVElJ4AMIiJxS+sxnK9CX73RED7iv/FYbqkvvX01ig= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0/go.mod h1:zRQU4eN6rNXeVKD8g2p2Czb88o/Hd2BkVdar5nCk0+k= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.87.0 h1:sx1ye7Y2rJ2qi11i2ih9T7BocxaV0uaBBf7B8ijCYpU= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.87.0/go.mod h1:AobBiNPFNHUm0MJFTieajasG/xNMjMYI7BGGTSKh0xg= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.87.0 h1:sy75u6ZwBvRwv9RjEF65SqlkBsAeZFqF4+eFOLhIsJQ= @@ -1781,6 +1793,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattribute github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.87.0/go.mod h1:g6H0fB9TW03Lb8M+H0BXtgQp7gPncIwf3Fk73xOs9EA= github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0 h1:QJKdtNcsxBhG2ZwSzYRVI0oxUqBJJvhfWf0OnjHU3jY= github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0/go.mod h1:skMmFcl+gxyiOQXvwHc0IKpC73iyQ7zl9r1aRNmPMwI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0 h1:gEv7UNu4K5ptvKIpWQmVS+0XMrIzqZWczcjyhLnsx9M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0/go.mod h1:6Rnjwj4bZU7Ab+nLD1YqQlbdsnsKoOR/OzyI42+PyE8= github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0 h1:BIGb6dfmaTlDE7KbiQUhnD9SvL5HanbJbWJrnzURfPY= github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0/go.mod h1:EnaQxXfCCWkSEfsQbGOvYbeJ/EuqvtMYTLTq8RN6TiY= github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.87.0 h1:4l/QetnprIMethZYfD2RK+MfMR83f6QycYb9bhJFItc= @@ -1965,8 +1979,8 @@ github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/consul_exporter v0.8.0 h1:2z3drFic65WFoHaJRKkmnJRRlBLmmxVqT8L9LO2yxAo= @@ -2494,8 +2508,8 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2616,8 +2630,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2635,8 +2649,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2779,8 +2793,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2791,8 +2805,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/integration-tests/common/metric.go b/integration-tests/common/metric.go index 95990a042475..fc3b6fee74ee 100644 --- a/integration-tests/common/metric.go +++ b/integration-tests/common/metric.go @@ -5,6 +5,11 @@ import ( "fmt" ) +type MetricsResponse struct { + Status string `json:"status"` + Data []Metric `json:"data"` +} + type MetricResponse struct { Status string `json:"status"` Data MetricData `json:"data"` @@ -53,6 +58,10 @@ func (m *MetricResponse) Unmarshal(data []byte) error { return json.Unmarshal(data, m) } +func (m *MetricsResponse) Unmarshal(data []byte) error { + return json.Unmarshal(data, m) +} + func (h *HistogramRawData) UnmarshalJSON(b []byte) error { var arr []json.RawMessage if err := json.Unmarshal(b, &arr); err != nil { diff --git a/integration-tests/common/metrics_assert.go b/integration-tests/common/metrics_assert.go new file mode 100644 index 000000000000..51f2d97daa51 --- /dev/null +++ b/integration-tests/common/metrics_assert.go @@ -0,0 +1,146 @@ +package common + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const promURL = "http://localhost:9009/prometheus/api/v1/" + +// Default metrics list according to what the prom-gen app is generating. +var PromDefaultMetrics = []string{ + "golang_counter", + "golang_gauge", + "golang_histogram_bucket", + "golang_summary", +} + +// Default histogram metrics list according to what the prom-gen app is generating. +var PromDefaultHistogramMetric = []string{ + "golang_native_histogram", +} + +// Default metrics list according to what the otel-metrics-gen app is generating. +var OtelDefaultMetrics = []string{ + "example_counter", + "example_float_counter", + "example_updowncounter", + "example_float_updowncounter", + "example_histogram_bucket", + "example_float_histogram_bucket", +} + +// Default histogram metrics list according to what the otel-metrics-gen app is generating. +var OtelDefaultHistogramMetrics = []string{ + "example_exponential_histogram", + "example_exponential_float_histogram", +} + +// MetricQuery returns a formatted Prometheus metric query with a given metricName and the given test_name label. +func MetricQuery(metricName string, testName string) string { + return fmt.Sprintf("%squery?query=%s{test_name='%s'}", promURL, metricName, testName) +} + +// MetricsQuery returns the list of available metrics matching the given test_name label. +func MetricsQuery(testName string) string { + return fmt.Sprintf("%sseries?match[]={test_name='%s'}", promURL, testName) +} + +// MimirMetricsTest checks that all given metrics are stored in Mimir. +func MimirMetricsTest(t *testing.T, metrics []string, histogramMetrics []string, testName string) { + AssertMetricsAvailable(t, metrics, histogramMetrics, testName) + for _, metric := range metrics { + metric := metric + t.Run(metric, func(t *testing.T) { + t.Parallel() + AssertMetricData(t, MetricQuery(metric, testName), metric, testName) + }) + } + for _, metric := range histogramMetrics { + metric := metric + t.Run(metric, func(t *testing.T) { + t.Parallel() + AssertHistogramData(t, MetricQuery(metric, testName), metric, testName) + }) + } +} + +// AssertMetricsAvailable performs a Prometheus query and expect the result to eventually contain the list of expected metrics. +func AssertMetricsAvailable(t *testing.T, metrics []string, histogramMetrics []string, testName string) { + var missingMetrics []string + expectedMetrics := append(metrics, histogramMetrics...) + query := MetricsQuery(testName) + require.EventuallyWithT(t, func(c *assert.CollectT) { + var metricsResponse MetricsResponse + err := FetchDataFromURL(query, &metricsResponse) + assert.NoError(c, err) + missingMetrics = checkMissingMetrics(expectedMetrics, metricsResponse.Data) + msg := fmt.Sprintf("Some metrics are missing: %v", missingMetrics) + if len(missingMetrics) == len(expectedMetrics) { + msg = "All metrics are missing" + } + assert.Empty(c, missingMetrics, msg) + }, DefaultTimeout, DefaultRetryInterval) +} + +// checkMissingMetrics returns the expectedMetrics which are not contained in actualMetrics. +func checkMissingMetrics(expectedMetrics []string, actualMetrics []Metric) []string { + metricSet := make(map[string]struct{}, len(actualMetrics)) + for _, metric := range actualMetrics { + metricSet[metric.Name] = struct{}{} + } + + var missingMetrics []string + for _, expectedMetric := range expectedMetrics { + if _, exists := metricSet[expectedMetric]; !exists { + missingMetrics = append(missingMetrics, expectedMetric) + } + } + return missingMetrics +} + +// AssertHistogramData performs a Prometheus query and expect the result to eventually contain the expected histogram. +// The count and sum metrics should be greater than 10 before the timeout triggers. +func AssertHistogramData(t *testing.T, query string, expectedMetric string, testName string) { + var metricResponse MetricResponse + assert.EventuallyWithT(t, func(c *assert.CollectT) { + err := FetchDataFromURL(query, &metricResponse) + assert.NoError(c, err) + if assert.NotEmpty(c, metricResponse.Data.Result) { + assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) + assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) + if assert.NotNil(c, metricResponse.Data.Result[0].Histogram) { + histogram := metricResponse.Data.Result[0].Histogram + if assert.NotEmpty(c, histogram.Data.Count) { + count, _ := strconv.Atoi(histogram.Data.Count) + assert.Greater(c, count, 10, "Count should be at some point greater than 10.") + } + if assert.NotEmpty(c, histogram.Data.Sum) { + sum, _ := strconv.ParseFloat(histogram.Data.Sum, 64) + assert.Greater(c, sum, 10., "Sum should be at some point greater than 10.") + } + assert.NotEmpty(c, histogram.Data.Buckets) + assert.Nil(c, metricResponse.Data.Result[0].Value) + } + } + }, DefaultTimeout, DefaultRetryInterval, "Histogram data did not satisfy the conditions within the time limit") +} + +// AssertMetricData performs a Prometheus query and expect the result to eventually contain the expected metric. +func AssertMetricData(t *testing.T, query, expectedMetric string, testName string) { + var metricResponse MetricResponse + assert.EventuallyWithT(t, func(c *assert.CollectT) { + err := FetchDataFromURL(query, &metricResponse) + assert.NoError(c, err) + if assert.NotEmpty(c, metricResponse.Data.Result) { + assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) + assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) + assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) + assert.Nil(c, metricResponse.Data.Result[0].Histogram) + } + }, DefaultTimeout, DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") +} diff --git a/integration-tests/docker-compose.yaml b/integration-tests/docker-compose.yaml index a94a05db21d9..eb5650f75f7f 100644 --- a/integration-tests/docker-compose.yaml +++ b/integration-tests/docker-compose.yaml @@ -29,4 +29,8 @@ services: dockerfile: ./integration-tests/configs/prom-gen/Dockerfile context: .. ports: - - "9001:9001" \ No newline at end of file + - "9001:9001" + redis: + image: redis:6.0.9-alpine + ports: + - "6379:6379" \ No newline at end of file diff --git a/integration-tests/main.go b/integration-tests/main.go index 29009742efae..7b48d21576d3 100644 --- a/integration-tests/main.go +++ b/integration-tests/main.go @@ -43,7 +43,7 @@ func runIntegrationTests(cmd *cobra.Command, args []string) { specificTest = "./tests/" + specificTest } logChan = make(chan TestLog, 1) - runSingleTest(specificTest) + runSingleTest(specificTest, 12345) } else { testDirs, err := filepath.Glob("./tests/*") if err != nil { diff --git a/integration-tests/tests/otlp-metrics/otlp_metrics_test.go b/integration-tests/tests/otlp-metrics/otlp_metrics_test.go index 6c3676a20604..f244f57213ba 100644 --- a/integration-tests/tests/otlp-metrics/otlp_metrics_test.go +++ b/integration-tests/tests/otlp-metrics/otlp_metrics_test.go @@ -3,91 +3,11 @@ package main import ( - "fmt" - "strconv" "testing" "github.com/grafana/agent/integration-tests/common" - "github.com/stretchr/testify/assert" ) -const promURL = "http://localhost:9009/prometheus/api/v1/query?query=" - -func metricQuery(metricName string, testName string) string { - return fmt.Sprintf("%s%s{test_name='%s'}", promURL, metricName, testName) -} - func TestOTLPMetrics(t *testing.T) { - const testName = "otlp_metrics" - tests := []struct { - metric string - }{ - // TODO: better differentiate these metric types? - {"example_counter"}, - {"example_float_counter"}, - {"example_updowncounter"}, - {"example_float_updowncounter"}, - {"example_histogram_bucket"}, - {"example_float_histogram_bucket"}, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.metric, func(t *testing.T) { - t.Parallel() - assertMetricData(t, metricQuery(tt.metric, testName), tt.metric, testName) - }) - } - - histogramTests := []string{ - "example_exponential_histogram", - "example_exponential_float_histogram", - } - - for _, metric := range histogramTests { - metric := metric - t.Run(metric, func(t *testing.T) { - t.Parallel() - assertHistogramData(t, metricQuery(metric, testName), metric, testName) - }) - } -} - -func assertHistogramData(t *testing.T, query string, expectedMetric string, testName string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) - if assert.NotNil(c, metricResponse.Data.Result[0].Histogram) { - histogram := metricResponse.Data.Result[0].Histogram - if assert.NotEmpty(c, histogram.Data.Count) { - count, _ := strconv.Atoi(histogram.Data.Count) - assert.Greater(c, count, 10, "Count should be at some point greater than 10.") - } - if assert.NotEmpty(c, histogram.Data.Sum) { - sum, _ := strconv.Atoi(histogram.Data.Sum) - assert.Greater(c, sum, 10, "Sum should be at some point greater than 10.") - } - assert.NotEmpty(c, histogram.Data.Buckets) - assert.Nil(c, metricResponse.Data.Result[0].Value) - } - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Histogram data did not satisfy the conditions within the time limit") -} - -func assertMetricData(t *testing.T, query, expectedMetric string, testName string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) - assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) - assert.Nil(c, metricResponse.Data.Result[0].Histogram) - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") + common.MimirMetricsTest(t, common.OtelDefaultMetrics, common.OtelDefaultHistogramMetrics, "otlp_metrics") } diff --git a/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go b/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go index 9a0c5d780975..930de77ba403 100644 --- a/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go +++ b/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go @@ -4,39 +4,20 @@ package main import ( "testing" + + "github.com/grafana/agent/integration-tests/common" ) func TestOTLPToPromMetrics(t *testing.T) { - const testName = "otlp_to_prom_metrics" - tests := []struct { - metric string - }{ - {"example_counter_total"}, - {"example_float_counter_total"}, - {"example_updowncounter"}, - {"example_float_updowncounter"}, - {"example_histogram_bucket"}, - {"example_float_histogram_bucket"}, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.metric, func(t *testing.T) { - t.Parallel() - assertMetricData(t, metricQuery(tt.metric, testName), tt.metric, testName) - }) + // Not using the default here because some metric names change during the conversion. + metrics := []string{ + "example_counter_total", // Change from example_counter to example_counter_total. + "example_float_counter_total", // Change from example_float_counter to example_float_counter_total. + "example_updowncounter", + "example_float_updowncounter", + "example_histogram_bucket", + "example_float_histogram_bucket", } - histogramTests := []string{ - "example_exponential_histogram", - "example_exponential_float_histogram", - } - - for _, metric := range histogramTests { - metric := metric - t.Run(metric, func(t *testing.T) { - t.Parallel() - assertHistogramData(t, metricQuery(metric, testName), metric, testName) - }) - } + common.MimirMetricsTest(t, metrics, common.OtelDefaultHistogramMetrics, "otlp_to_prom_metrics") } diff --git a/integration-tests/tests/redis/config.river b/integration-tests/tests/redis/config.river new file mode 100644 index 000000000000..ae333681cf4e --- /dev/null +++ b/integration-tests/tests/redis/config.river @@ -0,0 +1,25 @@ +prometheus.exporter.redis "redis_metrics" { + redis_addr = "localhost:6379" +} + +prometheus.scrape "redis_metrics" { + targets = prometheus.exporter.redis.redis_metrics.targets + forward_to = [prometheus.remote_write.redis_metrics.receiver] + scrape_interval = "1s" + scrape_timeout = "500ms" +} + +prometheus.remote_write "redis_metrics" { + endpoint { + url = "http://localhost:9009/api/v1/push" + metadata_config { + send_interval = "1s" + } + queue_config { + max_samples_per_send = 100 + } + } + external_labels = { + test_name = "redis_metrics", + } +} diff --git a/integration-tests/tests/redis/redis_metrics_test.go b/integration-tests/tests/redis/redis_metrics_test.go new file mode 100644 index 000000000000..b21c35c0ef0f --- /dev/null +++ b/integration-tests/tests/redis/redis_metrics_test.go @@ -0,0 +1,32 @@ +//go:build !windows + +package main + +import ( + "testing" + + "github.com/grafana/agent/integration-tests/common" +) + +func TestRedisMetrics(t *testing.T) { + var redisMetrics = []string{ + "redis_up", + "redis_memory_used_bytes", + "redis_blocked_clients", + "redis_commands_duration_seconds_total", + "redis_commands_total", + "redis_connected_clients", + "redis_connected_slaves", + "redis_db_keys", + "redis_db_keys_expiring", + "redis_evicted_keys_total", + "redis_keyspace_hits_total", + "redis_keyspace_misses_total", + "redis_memory_max_bytes", + "redis_memory_used_bytes", + "redis_memory_used_rss_bytes", + "redis_up", + } + // TODO(marctc): Report list of failed metrics instead of one by one. + common.MimirMetricsTest(t, redisMetrics, []string{}, "redis_metrics") +} diff --git a/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go b/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go index daaaa519131e..8765c8930ea8 100644 --- a/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go +++ b/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go @@ -3,78 +3,11 @@ package main import ( - "fmt" - "strconv" "testing" "github.com/grafana/agent/integration-tests/common" - "github.com/stretchr/testify/assert" ) -const promURL = "http://localhost:9009/prometheus/api/v1/query?query=" - -func metricQuery(metricName string) string { - return fmt.Sprintf("%s%s{test_name='scrape_prom_metrics'}", promURL, metricName) -} - func TestScrapePromMetrics(t *testing.T) { - metrics := []string{ - // TODO: better differentiate these metric types? - "golang_counter", - "golang_gauge", - "golang_histogram_bucket", - "golang_summary", - "golang_native_histogram", - } - - for _, metric := range metrics { - metric := metric - t.Run(metric, func(t *testing.T) { - t.Parallel() - if metric == "golang_native_histogram" { - assertHistogramData(t, metricQuery(metric), metric) - } else { - assertMetricData(t, metricQuery(metric), metric) - } - }) - } -} - -func assertHistogramData(t *testing.T, query string, expectedMetric string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, "scrape_prom_metrics") - if assert.NotNil(c, metricResponse.Data.Result[0].Histogram) { - histogram := metricResponse.Data.Result[0].Histogram - if assert.NotEmpty(c, histogram.Data.Count) { - count, _ := strconv.Atoi(histogram.Data.Count) - assert.Greater(c, count, 10, "Count should be at some point greater than 10.") - } - if assert.NotEmpty(c, histogram.Data.Sum) { - sum, _ := strconv.ParseFloat(histogram.Data.Sum, 64) - assert.Greater(c, sum, 10., "Sum should be at some point greater than 10.") - } - assert.NotEmpty(c, histogram.Data.Buckets) - assert.Nil(c, metricResponse.Data.Result[0].Value) - } - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Histogram data did not satisfy the conditions within the time limit") -} - -func assertMetricData(t *testing.T, query, expectedMetric string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, "scrape_prom_metrics") - assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) - assert.Nil(c, metricResponse.Data.Result[0].Histogram) - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") + common.MimirMetricsTest(t, common.PromDefaultMetrics, common.PromDefaultHistogramMetric, "scrape_prom_metrics") } diff --git a/integration-tests/tests/unix/config.river b/integration-tests/tests/unix/config.river new file mode 100644 index 000000000000..cb8ce5cdbf83 --- /dev/null +++ b/integration-tests/tests/unix/config.river @@ -0,0 +1,23 @@ +prometheus.exporter.unix "node_exporter" { } + +prometheus.scrape "demo" { + targets = prometheus.exporter.unix.node_exporter.targets + forward_to = [prometheus.remote_write.node_exporter.receiver] + scrape_interval = "1s" + scrape_timeout = "500ms" +} + +prometheus.remote_write "node_exporter" { + endpoint { + url = "http://localhost:9009/api/v1/push" + metadata_config { + send_interval = "1s" + } + queue_config { + max_samples_per_send = 100 + } + } + external_labels = { + test_name = "unix_metrics", + } +} \ No newline at end of file diff --git a/integration-tests/tests/unix/unix_metrics_test.go b/integration-tests/tests/unix/unix_metrics_test.go new file mode 100644 index 000000000000..15c4dae42603 --- /dev/null +++ b/integration-tests/tests/unix/unix_metrics_test.go @@ -0,0 +1,164 @@ +//go:build !windows + +package main + +import ( + "testing" + + "github.com/grafana/agent/integration-tests/common" +) + +func TestUnixMetrics(t *testing.T) { + var unixMetrics = []string{ + "node_arp_entries", + "node_boot_time_seconds", + "node_context_switches_total", + "node_cpu_seconds_total", + "node_disk_io_time_seconds_total", + "node_disk_io_time_weighted_seconds_total", + "node_disk_read_bytes_total", + "node_disk_read_time_seconds_total", + "node_disk_reads_completed_total", + "node_disk_write_time_seconds_total", + "node_disk_writes_completed_total", + "node_disk_written_bytes_total", + "node_filefd_allocated", + "node_filefd_maximum", + "node_filesystem_avail_bytes", + "node_filesystem_device_error", + "node_filesystem_files", + "node_filesystem_files_free", + "node_filesystem_readonly", + "node_filesystem_size_bytes", + "node_intr_total", + "node_load1", + "node_load15", + "node_load5", + "node_memory_Active_anon_bytes", + "node_memory_Active_bytes", + "node_memory_Active_file_bytes", + "node_memory_AnonHugePages_bytes", + "node_memory_AnonPages_bytes", + "node_memory_Bounce_bytes", + "node_memory_Buffers_bytes", + "node_memory_Cached_bytes", + "node_memory_CommitLimit_bytes", + "node_memory_Committed_AS_bytes", + "node_memory_DirectMap1G_bytes", + "node_memory_DirectMap2M_bytes", + "node_memory_DirectMap4k_bytes", + "node_memory_Dirty_bytes", + "node_memory_HugePages_Free", + "node_memory_HugePages_Rsvd", + "node_memory_HugePages_Surp", + "node_memory_HugePages_Total", + "node_memory_Hugepagesize_bytes", + "node_memory_Inactive_anon_bytes", + "node_memory_Inactive_bytes", + "node_memory_Inactive_file_bytes", + "node_memory_Mapped_bytes", + "node_memory_MemAvailable_bytes", + "node_memory_MemFree_bytes", + "node_memory_MemTotal_bytes", + "node_memory_SReclaimable_bytes", + "node_memory_SUnreclaim_bytes", + "node_memory_ShmemHugePages_bytes", + "node_memory_ShmemPmdMapped_bytes", + "node_memory_Shmem_bytes", + "node_memory_Slab_bytes", + "node_memory_SwapTotal_bytes", + "node_memory_VmallocChunk_bytes", + "node_memory_VmallocTotal_bytes", + "node_memory_VmallocUsed_bytes", + "node_memory_WritebackTmp_bytes", + "node_memory_Writeback_bytes", + "node_netstat_Icmp6_InErrors", + "node_netstat_Icmp6_InMsgs", + "node_netstat_Icmp6_OutMsgs", + "node_netstat_Icmp_InErrors", + "node_netstat_Icmp_InMsgs", + "node_netstat_Icmp_OutMsgs", + "node_netstat_IpExt_InOctets", + "node_netstat_IpExt_OutOctets", + "node_netstat_TcpExt_ListenDrops", + "node_netstat_TcpExt_ListenOverflows", + "node_netstat_TcpExt_TCPSynRetrans", + "node_netstat_Tcp_InErrs", + "node_netstat_Tcp_InSegs", + "node_netstat_Tcp_OutRsts", + "node_netstat_Tcp_OutSegs", + "node_netstat_Tcp_RetransSegs", + "node_netstat_Udp6_InDatagrams", + "node_netstat_Udp6_InErrors", + "node_netstat_Udp6_NoPorts", + "node_netstat_Udp6_OutDatagrams", + "node_netstat_Udp6_RcvbufErrors", + "node_netstat_Udp6_SndbufErrors", + "node_netstat_UdpLite_InErrors", + "node_netstat_Udp_InDatagrams", + "node_netstat_Udp_InErrors", + "node_netstat_Udp_NoPorts", + "node_netstat_Udp_OutDatagrams", + "node_netstat_Udp_RcvbufErrors", + "node_netstat_Udp_SndbufErrors", + "node_network_carrier", + "node_network_info", + "node_network_mtu_bytes", + "node_network_receive_bytes_total", + "node_network_receive_compressed_total", + "node_network_receive_drop_total", + "node_network_receive_errs_total", + "node_network_receive_fifo_total", + "node_network_receive_multicast_total", + "node_network_receive_packets_total", + "node_network_speed_bytes", + "node_network_transmit_bytes_total", + "node_network_transmit_compressed_total", + "node_network_transmit_drop_total", + "node_network_transmit_errs_total", + "node_network_transmit_fifo_total", + "node_network_transmit_packets_total", + "node_network_transmit_queue_length", + "node_network_up", + "node_nf_conntrack_entries", + "node_nf_conntrack_entries_limit", + "node_os_info", + "node_sockstat_FRAG6_inuse", + "node_sockstat_FRAG_inuse", + "node_sockstat_RAW6_inuse", + "node_sockstat_RAW_inuse", + "node_sockstat_TCP6_inuse", + "node_sockstat_TCP_alloc", + "node_sockstat_TCP_inuse", + "node_sockstat_TCP_mem", + "node_sockstat_TCP_mem_bytes", + "node_sockstat_TCP_orphan", + "node_sockstat_TCP_tw", + "node_sockstat_UDP6_inuse", + "node_sockstat_UDPLITE6_inuse", + "node_sockstat_UDPLITE_inuse", + "node_sockstat_UDP_inuse", + "node_sockstat_UDP_mem", + "node_sockstat_UDP_mem_bytes", + "node_sockstat_sockets_used", + "node_softnet_dropped_total", + "node_softnet_processed_total", + "node_softnet_times_squeezed_total", + "node_textfile_scrape_error", + "node_time_zone_offset_seconds", + "node_timex_estimated_error_seconds", + "node_timex_maxerror_seconds", + "node_timex_offset_seconds", + "node_timex_sync_status", + "node_uname_info", + "node_vmstat_oom_kill", + "node_vmstat_pgfault", + "node_vmstat_pgmajfault", + "node_vmstat_pgpgin", + "node_vmstat_pgpgout", + "node_vmstat_pswpin", + "node_vmstat_pswpout", + } + // TODO(marctc): Report list of failed metrics instead of one by one. + common.MimirMetricsTest(t, unixMetrics, []string{}, "unix_metrics") +} diff --git a/integration-tests/utils.go b/integration-tests/utils.go index d723a235235c..9e9280f0247c 100644 --- a/integration-tests/utils.go +++ b/integration-tests/utils.go @@ -40,7 +40,7 @@ func setupEnvironment() { executeCommand("docker-compose", []string{"up", "-d"}, "Setting up environment with Docker Compose") } -func runSingleTest(testDir string) { +func runSingleTest(testDir string, port int) { info, err := os.Stat(testDir) if err != nil { panic(err) @@ -52,7 +52,7 @@ func runSingleTest(testDir string) { dirName := filepath.Base(testDir) var agentLogBuffer bytes.Buffer - cmd := exec.Command(agentBinaryPath, "run", "config.river") + cmd := exec.Command(agentBinaryPath, "run", "config.river", "--server.http.listen-addr", fmt.Sprintf("0.0.0.0:%d", port)) cmd.Dir = testDir cmd.Stdout = &agentLogBuffer cmd.Stderr = &agentLogBuffer @@ -96,17 +96,16 @@ func runAllTests() { panic(err) } var wg sync.WaitGroup - - for _, testDir := range testDirs { + port := 12345 + for i, testDir := range testDirs { fmt.Println("Running", testDir) wg.Add(1) - go func(td string) { + go func(td string, offset int) { defer wg.Done() - runSingleTest(td) - }(testDir) + runSingleTest(td, port+offset) + }(testDir, i) } wg.Wait() - close(logChan) } func cleanUpEnvironment() { @@ -119,6 +118,9 @@ func cleanUpEnvironment() { func reportResults() { testsFailed := 0 + // It's ok to close the channel here because all tests are finished. + // If the channel would not be closed, the for loop would wait forever. + close(logChan) for log := range logChan { fmt.Printf("Failure detected in %s:\n", log.TestDir) fmt.Println("Test output:", log.TestOutput) diff --git a/component/module/git/internal/vcs/auth.go b/internal/vcs/auth.go similarity index 100% rename from component/module/git/internal/vcs/auth.go rename to internal/vcs/auth.go diff --git a/component/module/git/internal/vcs/errors.go b/internal/vcs/errors.go similarity index 100% rename from component/module/git/internal/vcs/errors.go rename to internal/vcs/errors.go diff --git a/component/module/git/internal/vcs/git.go b/internal/vcs/git.go similarity index 100% rename from component/module/git/internal/vcs/git.go rename to internal/vcs/git.go diff --git a/component/module/git/internal/vcs/git_test.go b/internal/vcs/git_test.go similarity index 97% rename from component/module/git/internal/vcs/git_test.go rename to internal/vcs/git_test.go index 7680c857db0e..a7614eb9507f 100644 --- a/component/module/git/internal/vcs/git_test.go +++ b/internal/vcs/git_test.go @@ -6,7 +6,7 @@ import ( "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" - "github.com/grafana/agent/component/module/git/internal/vcs" + "github.com/grafana/agent/internal/vcs" "github.com/stretchr/testify/require" ) diff --git a/operations/agent-flow-mixin/alerts/clustering.libsonnet b/operations/agent-flow-mixin/alerts/clustering.libsonnet index bfa14f1f1e34..b4d5edc98988 100644 --- a/operations/agent-flow-mixin/alerts/clustering.libsonnet +++ b/operations/agent-flow-mixin/alerts/clustering.libsonnet @@ -8,7 +8,7 @@ alert.newGroup( 'ClusterNotConverging', 'stddev by (cluster, namespace) (sum without (state) (cluster_node_peers)) != 0', 'Cluster is not converging.', - '5m', + '10m', ), // Cluster has entered a split brain state. @@ -23,15 +23,7 @@ alert.newGroup( count by (cluster, namespace) (cluster_node_info) |||, 'Cluster nodes have entered a split brain state.', - '5m', - ), - - // Standard Deviation of Lamport clock time between nodes is too high. - alert.newRule( - 'ClusterLamportClockDrift', - 'stddev by (cluster, namespace) (cluster_node_lamport_time) > 4 * sqrt(count by (cluster, namespace) (cluster_node_info))', - "Cluster nodes' lamport clocks are not converging.", - '5m' + '10m', ), // Nodes health score is not zero. @@ -41,22 +33,7 @@ alert.newGroup( cluster_node_gossip_health_score > 0 |||, 'Cluster node is reporting a health score > 0.', - '5m', - ), - - // Lamport clock of a node is not progressing at all. - // - // This only checks for nodes that have peers other than themselves; nodes - // with no external peers will not increase their lamport time because - // there is no cluster networking traffic. - alert.newRule( - 'ClusterLamportClockStuck', - ||| - sum by (cluster, namespace, instance) (rate(cluster_node_lamport_time[2m])) == 0 - and on (cluster, namespace, instance) (cluster_node_peers > 1) - |||, - "Cluster nodes's lamport clocks is not progressing.", - '5m', + '10m', ), // Node tried to join the cluster with an already-present node name. @@ -72,7 +49,7 @@ alert.newGroup( 'ClusterNodeStuckTerminating', 'sum by (cluster, namespace, instance) (cluster_node_peers{state="terminating"}) > 0', 'Cluster node stuck in Terminating state.', - '5m', + '10m', ), // Nodes are not using the same configuration file. @@ -86,8 +63,5 @@ alert.newGroup( 'Cluster nodes are not using the same configuration file.', '5m', ), - - // TODO(@tpaschalis) Alert on open transport streams once we investigate - // their behavior. ] ) diff --git a/operations/agent-flow-mixin/alerts/controller.libsonnet b/operations/agent-flow-mixin/alerts/controller.libsonnet index b6451db76292..3aeb5eabbb10 100644 --- a/operations/agent-flow-mixin/alerts/controller.libsonnet +++ b/operations/agent-flow-mixin/alerts/controller.libsonnet @@ -14,7 +14,7 @@ alert.newGroup( // Unhealthy components detected. alert.newRule( 'UnhealthyComponents', - 'sum(agent_component_controller_running_components{health_type!="healthy"}) > 0', + 'sum by (cluster, namespace) (agent_component_controller_running_components{health_type!="healthy"}) > 0', 'Unhealthy Flow components detected.', '15m', ), diff --git a/operations/agent-flow-mixin/dashboards.libsonnet b/operations/agent-flow-mixin/dashboards.libsonnet index 7ecf696bf38f..661de183dc96 100644 --- a/operations/agent-flow-mixin/dashboards.libsonnet +++ b/operations/agent-flow-mixin/dashboards.libsonnet @@ -2,7 +2,8 @@ grafanaDashboards+: (import './dashboards/controller.libsonnet') + (import './dashboards/resources.libsonnet') + - (import './dashboards/prometheus.remote_write.libsonnet') + + (import './dashboards/prometheus.libsonnet') + (import './dashboards/cluster-node.libsonnet') + + (import './dashboards/opentelemetry.libsonnet') + (import './dashboards/cluster-overview.libsonnet'), } diff --git a/operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet b/operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet new file mode 100644 index 000000000000..a88fdf3893ff --- /dev/null +++ b/operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet @@ -0,0 +1,174 @@ +local dashboard = import './utils/dashboard.jsonnet'; +local panel = import './utils/panel.jsonnet'; +local filename = 'agent-flow-opentelemetry.json'; + +local stackedPanelMixin = { + fieldConfig+: { + defaults+: { + custom+: { + fillOpacity: 20, + gradientMode: 'hue', + stacking: { mode: 'normal' }, + }, + }, + }, +}; + +{ + [filename]: + dashboard.new(name='Grafana Agent Flow / OpenTelemetry') + + dashboard.withDashboardsLink() + + dashboard.withUID(std.md5(filename)) + + dashboard.withTemplateVariablesMixin([ + dashboard.newTemplateVariable('cluster', ||| + label_values(agent_component_controller_running_components, cluster) + |||), + dashboard.newTemplateVariable('namespace', ||| + label_values(agent_component_controller_running_components{cluster="$cluster"}, namespace) + |||), + dashboard.newMultiTemplateVariable('instance', ||| + label_values(agent_component_controller_running_components{cluster="$cluster", namespace="$namespace"}, instance) + |||), + ]) + + dashboard.withPanelsMixin([ + // "Receivers for traces" row + ( + panel.new('Receivers for traces [otelcol.receiver]', 'row') + + panel.withPosition({ h: 1, w: 24, x: 0, y: 0 }) + ), + ( + panel.new(title='Accepted spans', type='timeseries') + + panel.withDescription(||| + Number of spans successfully pushed into the pipeline. + |||) + + panel.withPosition({ x: 0, y: 0, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(receiver_accepted_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + //TODO: How will the dashboard look if there is more than one receiver component? The legend is not unique enough? + legendFormat='{{ pod }} / {{ transport }}', + ), + ]) + ), + ( + panel.new(title='Refused spans', type='timeseries') + + stackedPanelMixin + + panel.withDescription(||| + Number of spans that could not be pushed into the pipeline. + |||) + + panel.withPosition({ x: 8, y: 0, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(receiver_refused_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }} / {{ transport }}', + ), + ]) + ), + ( + panel.newHeatmap('RPC server duration (traces)') + + panel.withUnit('milliseconds') + + panel.withDescription(||| + The duration of inbound RPCs. + |||) + + panel.withPosition({ x: 16, y: 0, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr='sum by (le) (increase(rpc_server_duration_milliseconds_bucket{cluster="$cluster", namespace="$namespace", instance=~"$instance", rpc_service="opentelemetry.proto.collector.trace.v1.TraceService"}[$__rate_interval]))', + format='heatmap', + legendFormat='{{le}}', + ), + ]) + ), + + // "Batching" row + ( + panel.new('Batching [otelcol.processor.batch]', 'row') + + panel.withPosition({ h: 1, w: 24, x: 0, y: 10 }) + ), + ( + panel.newHeatmap('Number of units in the batch') + + panel.withDescription(||| + Number of units in the batch + |||) + + panel.withPosition({ x: 0, y: 10, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr='sum by (le) (increase(processor_batch_batch_send_size_ratio_bucket{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]))', + format='heatmap', + legendFormat='{{le}}', + ), + ]) + ), + ( + panel.new(title='Distinct metadata values', type='timeseries') + + panel.withDescription(||| + Number of distinct metadata value combinations being processed + |||) + + panel.withPosition({ x: 8, y: 10, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + processor_batch_metadata_cardinality_ratio{cluster="$cluster", namespace="$namespace", instance=~"$instance"} + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + ( + panel.new(title='Timeout trigger', type='timeseries') + + panel.withDescription(||| + Number of times the batch was sent due to a timeout trigger + |||) + + panel.withPosition({ x: 16, y: 10, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(processor_batch_timeout_trigger_send_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + + // "Exporters for traces" row + ( + panel.new('Exporters for traces [otelcol.exporter]', 'row') + + panel.withPosition({ h: 1, w: 24, x: 0, y: 20 }) + ), + ( + panel.new(title='Exported sent spans', type='timeseries') + + panel.withDescription(||| + Number of spans successfully sent to destination. + |||) + + panel.withPosition({ x: 0, y: 20, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(exporter_sent_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + ( + panel.new(title='Exported failed spans', type='timeseries') + + panel.withDescription(||| + Number of spans in failed attempts to send to destination. + |||) + + panel.withPosition({ x: 8, y: 20, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(exporter_send_failed_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + + ]), +} diff --git a/operations/agent-flow-mixin/dashboards/prometheus.libsonnet b/operations/agent-flow-mixin/dashboards/prometheus.libsonnet new file mode 100644 index 000000000000..21ae79f3b063 --- /dev/null +++ b/operations/agent-flow-mixin/dashboards/prometheus.libsonnet @@ -0,0 +1,426 @@ +local dashboard = import './utils/dashboard.jsonnet'; +local panel = import './utils/panel.jsonnet'; +local filename = 'agent-flow-prometheus-remote-write.json'; + +local stackedPanelMixin = { + fieldConfig+: { + defaults+: { + custom+: { + fillOpacity: 20, + gradientMode: 'hue', + stacking: { mode: 'normal' }, + }, + }, + }, +}; + +local scrapePanels(y_offset) = [ + panel.newRow(title='prometheus.scrape', y=y_offset), + + // Scrape success rate + ( + panel.new(title='Scrape success rate in $cluster', type='timeseries') + + panel.withUnit('percentunit') + + panel.withDescription(||| + Percentage of targets successfully scraped by prometheus.scrape + components. + + This metric is calculated by dividing the number of targets + successfully scraped by the total number of targets scraped, + across all the namespaces in the selected cluster. + + Low success rates can indicate a problem with scrape targets, + stale service discovery, or agent misconfiguration. + |||) + + panel.withPosition({ x: 0, y: 1 + y_offset, w: 12, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum(up{cluster="$cluster"}) + / + count (up{cluster="$cluster"}) + |||, + legendFormat='% of targets successfully scraped', + ), + ]) + ), + + // Scrape duration + ( + panel.new(title='Scrape duration in $cluster', type='timeseries') + + panel.withUnit('s') + + panel.withDescription(||| + Duration of successful scrapes by prometheus.scrape components, + across all the namespaces in the selected cluster. + + This metric should be below your configured scrape interval. + High durations can indicate a problem with a scrape target or + a performance issue with the agent. + |||) + + panel.withPosition({ x: 12, y: 1 + y_offset, w: 12, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + quantile(0.99, scrape_duration_seconds{cluster="$cluster"}) + |||, + legendFormat='p99', + ), + panel.newQuery( + expr=||| + quantile(0.95, scrape_duration_seconds{cluster="$cluster"}) + |||, + legendFormat='p95', + ), + panel.newQuery( + expr=||| + quantile(0.50, scrape_duration_seconds{cluster="$cluster"}) + |||, + legendFormat='p50', + ), + + ]) + ), +]; + +local remoteWritePanels(y_offset) = [ + panel.newRow(title='prometheus.remote_write', y=y_offset), + + // WAL delay + ( + panel.new(title='WAL delay', type='timeseries') + + panel.withUnit('s') + + panel.withDescription(||| + How far behind prometheus.remote_write from samples recently written + to the WAL. + + Each endpoint prometheus.remote_write is configured to send metrics + has its own delay. The time shown here is the sum across all + endpoints for the given component. + + It is normal for the WAL delay to be within 1-3 scrape intervals. If + the WAL delay continues to increase beyond that amount, try + increasing the number of maximum shards. + |||) + + panel.withPosition({ x: 0, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum by (instance, component_id) ( + prometheus_remote_storage_highest_timestamp_in_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"} + - ignoring(url, remote_name) group_right(instance) + prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Data write throughput + ( + panel.new(title='Data write throughput', type='timeseries') + + stackedPanelMixin + + panel.withUnit('Bps') + + panel.withDescription(||| + Rate of data containing samples and metadata sent by + prometheus.remote_write. + |||) + + panel.withPosition({ x: 6, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (remote_name, url) ( + rate(prometheus_remote_storage_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + + rate(prometheus_remote_storage_metadata_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Write latency + ( + panel.new(title='Write latency', type='timeseries') + + panel.withUnit('s') + + panel.withDescription(||| + Latency of writes to the remote system made by + prometheus.remote_write. + |||) + + panel.withPosition({ x: 12, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + histogram_quantile(0.99, sum by (le) ( + rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + )) + |||, + legendFormat='99th percentile', + ), + panel.newQuery( + expr=||| + histogram_quantile(0.50, sum by (le) ( + rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + )) + |||, + legendFormat='50th percentile', + ), + panel.newQuery( + expr=||| + sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) / + sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) + |||, + legendFormat='Average', + ), + ]) + ), + + // Shards + ( + local minMaxOverride = { + properties: [{ + id: 'custom.lineStyle', + value: { + dash: [10, 15], + fill: 'dash', + }, + }, { + id: 'custom.showPoints', + value: 'never', + }, { + id: 'custom.hideFrom', + value: { + legend: true, + tooltip: false, + viz: false, + }, + }], + }; + + panel.new(title='Shards', type='timeseries') { + fieldConfig+: { + overrides: [ + minMaxOverride { matcher: { id: 'byName', options: 'Minimum' } }, + minMaxOverride { matcher: { id: 'byName', options: 'Maximum' } }, + ], + }, + } + + panel.withUnit('none') + + panel.withDescription(||| + Total number of shards which are concurrently sending samples read + from the Write-Ahead Log. + + Shards are bound to a minimum and maximum, displayed on the graph. + The lowest minimum and the highest maximum across all clients is + shown. + + Each client has its own set of shards, minimum shards, and maximum + shards; filter to a specific URL to display more granular + information. + |||) + + panel.withPosition({ x: 18, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (remote_name, url) ( + prometheus_remote_storage_shards{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + panel.newQuery( + expr=||| + min ( + prometheus_remote_storage_shards_min{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='Minimum', + ), + panel.newQuery( + expr=||| + max ( + prometheus_remote_storage_shards_max{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='Maximum', + ), + ]) + ), + + // Sent samples / second + ( + panel.new(title='Sent samples / second', type='timeseries') + + stackedPanelMixin + + panel.withUnit('cps') + + panel.withDescription(||| + Total outgoing samples sent by prometheus.remote_write. + |||) + + panel.withPosition({ x: 0, y: 11 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (url, remote_name) ( + rate(prometheus_remote_storage_samples_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Failed samples / second + ( + panel.new(title='Failed samples / second', type='timeseries') + + stackedPanelMixin + + panel.withUnit('cps') + + panel.withDescription(||| + Rate of samples which prometheus.remote_write could not send due to + non-recoverable errors. + |||) + + panel.withPosition({ x: 8, y: 11 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (url,remote_name) ( + rate(prometheus_remote_storage_samples_failed_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Retried samples / second + ( + panel.new(title='Retried samples / second', type='timeseries') + + stackedPanelMixin + + panel.withUnit('cps') + + panel.withDescription(||| + Rate of samples which prometheus.remote_write attempted to resend + after receiving a recoverable error. + |||) + + panel.withPosition({ x: 16, y: 11 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (url,remote_name) ( + rate(prometheus_remote_storage_samples_retried_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Active series (Total) + ( + panel.new(title='Active series (total)', type='timeseries') { + options+: { + legend+: { + showLegend: false, + }, + }, + } + + panel.withUnit('short') + + panel.withDescription(||| + Total number of active series across all components. + + An "active series" is a series that prometheus.remote_write recently + received a sample for. Active series are garbage collected whenever a + truncation of the WAL occurs. + |||) + + panel.withPosition({ x: 0, y: 21 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum(agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}) + |||, + legendFormat='Series', + ), + ]) + ), + + // Active series (by instance/component) + ( + panel.new(title='Active series (by instance/component)', type='timeseries') + + panel.withUnit('short') + + panel.withDescription(||| + Total number of active series which are currently being tracked by + prometheus.remote_write components, with separate lines for each agent instance. + + An "active series" is a series that prometheus.remote_write recently + received a sample for. Active series are garbage collected whenever a + truncation of the WAL occurs. + |||) + + panel.withPosition({ x: 8, y: 21 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"} + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Active series (by component) + ( + panel.new(title='Active series (by component)', type='timeseries') + + panel.withUnit('short') + + panel.withDescription(||| + Total number of active series which are currently being tracked by + prometheus.remote_write components, aggregated across all instances. + + An "active series" is a series that prometheus.remote_write recently + received a sample for. Active series are garbage collected whenever a + truncation of the WAL occurs. + |||) + + panel.withPosition({ x: 16, y: 21 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum by (component_id) (agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"}) + |||, + legendFormat='{{component_id}}', + ), + ]) + ), +]; + +{ + [filename]: + dashboard.new(name='Grafana Agent Flow / Prometheus Components') + + dashboard.withDocsLink( + url='https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/', + desc='Component documentation', + ) + + dashboard.withDashboardsLink() + + dashboard.withUID(std.md5(filename)) + + dashboard.withTemplateVariablesMixin([ + dashboard.newTemplateVariable('cluster', ||| + label_values(agent_component_controller_running_components, cluster) + |||), + dashboard.newTemplateVariable('namespace', ||| + label_values(agent_component_controller_running_components{cluster="$cluster"}, namespace) + |||), + dashboard.newMultiTemplateVariable('instance', ||| + label_values(agent_component_controller_running_components{cluster="$cluster", namespace="$namespace"}, instance) + |||), + dashboard.newMultiTemplateVariable('component', ||| + label_values(agent_wal_samples_appended_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"prometheus\\.remote_write\\..*"}, component_id) + |||), + dashboard.newMultiTemplateVariable('url', ||| + label_values(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"}, url) + |||), + ]) + + // TODO(@tpaschalis) Make the annotation optional. + dashboard.withAnnotations([ + dashboard.newLokiAnnotation('Deployments', '{cluster="$cluster", container="kube-diff-logger"} | json | namespace_extracted="grafana-agent" | name_extracted=~"grafana-agent.*"', 'rgba(0, 211, 255, 1)'), + ]) + + dashboard.withPanelsMixin( + // First row, offset is 0 + scrapePanels(y_offset=0) + + // Scrape panels take 11 units, so offset next row by 11. + remoteWritePanels(y_offset=11) + ), +} diff --git a/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet b/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet deleted file mode 100644 index d9e0d8d170b4..000000000000 --- a/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet +++ /dev/null @@ -1,352 +0,0 @@ -local dashboard = import './utils/dashboard.jsonnet'; -local panel = import './utils/panel.jsonnet'; -local filename = 'agent-flow-prometheus-remote-write.json'; - -local stackedPanelMixin = { - fieldConfig+: { - defaults+: { - custom+: { - fillOpacity: 20, - gradientMode: 'hue', - stacking: { mode: 'normal' }, - }, - }, - }, -}; - -{ - [filename]: - dashboard.new(name='Grafana Agent Flow / prometheus.remote_write') + - dashboard.withDocsLink( - url='https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/', - desc='Component documentation', - ) + - dashboard.withDashboardsLink() + - dashboard.withUID(std.md5(filename)) + - dashboard.withTemplateVariablesMixin([ - dashboard.newTemplateVariable('cluster', ||| - label_values(agent_component_controller_running_components, cluster) - |||), - dashboard.newTemplateVariable('namespace', ||| - label_values(agent_component_controller_running_components{cluster="$cluster"}, namespace) - |||), - dashboard.newMultiTemplateVariable('instance', ||| - label_values(agent_component_controller_running_components{cluster="$cluster", namespace="$namespace"}, instance) - |||), - dashboard.newMultiTemplateVariable('component', ||| - label_values(agent_wal_samples_appended_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"prometheus\\.remote_write\\..*"}, component_id) - |||), - dashboard.newMultiTemplateVariable('url', ||| - label_values(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"}, url) - |||), - ]) + - // TODO(@tpaschalis) Make the annotation optional. - dashboard.withAnnotations([ - dashboard.newLokiAnnotation('Deployments', '{cluster="$cluster", container="kube-diff-logger"} | json | namespace_extracted="grafana-agent" | name_extracted=~"grafana-agent.*"', 'rgba(0, 211, 255, 1)'), - ]) + - dashboard.withPanelsMixin([ - // WAL delay - ( - panel.new(title='WAL delay', type='timeseries') + - panel.withUnit('s') + - panel.withDescription(||| - How far behind prometheus.remote_write from samples recently written - to the WAL. - - Each endpoint prometheus.remote_write is configured to send metrics - has its own delay. The time shown here is the sum across all - endpoints for the given component. - - It is normal for the WAL delay to be within 1-3 scrape intervals. If - the WAL delay continues to increase beyond that amount, try - increasing the number of maximum shards. - |||) + - panel.withPosition({ x: 0, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum by (instance, component_id) ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"} - - ignoring(url, remote_name) group_right(instance) - prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Data write throughput - ( - panel.new(title='Data write throughput', type='timeseries') + - stackedPanelMixin + - panel.withUnit('Bps') + - panel.withDescription(||| - Rate of data containing samples and metadata sent by - prometheus.remote_write. - |||) + - panel.withPosition({ x: 6, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (remote_name, url) ( - rate(prometheus_remote_storage_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + - rate(prometheus_remote_storage_metadata_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Write latency - ( - panel.new(title='Write latency', type='timeseries') + - panel.withUnit('s') + - panel.withDescription(||| - Latency of writes to the remote system made by - prometheus.remote_write. - |||) + - panel.withPosition({ x: 12, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - histogram_quantile(0.99, sum by (le) ( - rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - )) - |||, - legendFormat='99th percentile', - ), - panel.newQuery( - expr=||| - histogram_quantile(0.50, sum by (le) ( - rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - )) - |||, - legendFormat='50th percentile', - ), - panel.newQuery( - expr=||| - sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) / - sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) - |||, - legendFormat='Average', - ), - ]) - ), - - // Shards - ( - local minMaxOverride = { - properties: [{ - id: 'custom.lineStyle', - value: { - dash: [10, 15], - fill: 'dash', - }, - }, { - id: 'custom.showPoints', - value: 'never', - }, { - id: 'custom.hideFrom', - value: { - legend: true, - tooltip: false, - viz: false, - }, - }], - }; - - panel.new(title='Shards', type='timeseries') { - fieldConfig+: { - overrides: [ - minMaxOverride { matcher: { id: 'byName', options: 'Minimum' } }, - minMaxOverride { matcher: { id: 'byName', options: 'Maximum' } }, - ], - }, - } + - panel.withUnit('none') + - panel.withDescription(||| - Total number of shards which are concurrently sending samples read - from the Write-Ahead Log. - - Shards are bound to a minimum and maximum, displayed on the graph. - The lowest minimum and the highest maximum across all clients is - shown. - - Each client has its own set of shards, minimum shards, and maximum - shards; filter to a specific URL to display more granular - information. - |||) + - panel.withPosition({ x: 18, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (remote_name, url) ( - prometheus_remote_storage_shards{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - panel.newQuery( - expr=||| - min ( - prometheus_remote_storage_shards_min{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='Minimum', - ), - panel.newQuery( - expr=||| - max ( - prometheus_remote_storage_shards_max{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='Maximum', - ), - ]) - ), - - - // Sent samples / second - ( - panel.new(title='Sent samples / second', type='timeseries') + - stackedPanelMixin + - panel.withUnit('cps') + - panel.withDescription(||| - Total outgoing samples sent by prometheus.remote_write. - |||) + - panel.withPosition({ x: 0, y: 10, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (url, remote_name) ( - rate(prometheus_remote_storage_samples_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Failed samples / second - ( - panel.new(title='Failed samples / second', type='timeseries') + - stackedPanelMixin + - panel.withUnit('cps') + - panel.withDescription(||| - Rate of samples which prometheus.remote_write could not send due to - non-recoverable errors. - |||) + - panel.withPosition({ x: 8, y: 10, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (url,remote_name) ( - rate(prometheus_remote_storage_samples_failed_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Retried samples / second - ( - panel.new(title='Retried samples / second', type='timeseries') + - stackedPanelMixin + - panel.withUnit('cps') + - panel.withDescription(||| - Rate of samples which prometheus.remote_write attempted to resend - after receiving a recoverable error. - |||) + - panel.withPosition({ x: 16, y: 10, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (url,remote_name) ( - rate(prometheus_remote_storage_samples_retried_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Active series (Total) - ( - panel.new(title='Active series (total)', type='timeseries') { - options+: { - legend+: { - showLegend: false, - }, - }, - } + - panel.withUnit('short') + - panel.withDescription(||| - Total number of active series across all components. - - An "active series" is a series that prometheus.remote_write recently - received a sample for. Active series are garbage collected whenever a - truncation of the WAL occurs. - |||) + - panel.withPosition({ x: 0, y: 20, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum(agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}) - |||, - legendFormat='Series', - ), - ]) - ), - - // Active series (by instance/component) - ( - panel.new(title='Active series (by instance/component)', type='timeseries') + - panel.withUnit('short') + - panel.withDescription(||| - Total number of active series which are currently being tracked by - prometheus.remote_write components, with separate lines for each agent instance. - - An "active series" is a series that prometheus.remote_write recently - received a sample for. Active series are garbage collected whenever a - truncation of the WAL occurs. - |||) + - panel.withPosition({ x: 8, y: 20, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"} - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Active series (by component) - ( - panel.new(title='Active series (by component)', type='timeseries') + - panel.withUnit('short') + - panel.withDescription(||| - Total number of active series which are currently being tracked by - prometheus.remote_write components, aggregated across all instances. - - An "active series" is a series that prometheus.remote_write recently - received a sample for. Active series are garbage collected whenever a - truncation of the WAL occurs. - |||) + - panel.withPosition({ x: 16, y: 20, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum by (component_id) (agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"}) - |||, - legendFormat='{{component_id}}', - ), - ]) - ), - - - ]), -} diff --git a/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet b/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet index 43dedd3b16b3..63427a46c95d 100644 --- a/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet +++ b/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet @@ -7,6 +7,7 @@ timezone: 'utc', refresh: '10s', schemaVersion: 36, + graphTooltip: 1, // shared crosshair for all graphs tags: ['grafana-agent-flow-mixin'], templating: { list: [{ diff --git a/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet b/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet index 929af4699a93..93f7260f0923 100644 --- a/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet +++ b/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet @@ -127,4 +127,9 @@ refId: refId, } ), + + newRow(title='', x=0, y=0, w=24, h=1, collapsed=false):: + $.new(title, 'row') + + $.withPosition({x: x, y: y, w: w, h: h }) + + {collapsed: collapsed}, } diff --git a/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml index 3e1fae0fc527..153677bb175d 100644 --- a/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml +++ b/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml @@ -22,140 +22,81 @@ spec: - name: v1 schema: openAPIV3Schema: - description: PodMonitor defines monitoring for a set of pods. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Specification of desired Pod selection for target discovery - by Prometheus. properties: attachMetadata: - description: Attaches node metadata to discovered targets. Requires - Prometheus v2.35.0 and above. properties: node: - description: When set to true, Prometheus must have permissions - to get Nodes. type: boolean type: object jobLabel: - description: The label to use to retrieve the job name from. type: string labelLimit: - description: Per-scrape limit on number of labels that will be accepted - for a sample. Only valid in Prometheus versions 2.27.0 and newer. format: int64 type: integer labelNameLengthLimit: - description: Per-scrape limit on length of labels name that will be - accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. format: int64 type: integer labelValueLengthLimit: - description: Per-scrape limit on length of labels value that will - be accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. format: int64 type: integer namespaceSelector: - description: Selector to select which namespaces the Endpoints objects - are discovered from. properties: any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. type: boolean matchNames: - description: List of namespace names to select from. items: type: string type: array type: object podMetricsEndpoints: - description: A list of endpoints allowed as part of this PodMonitor. items: - description: PodMetricsEndpoint defines a scrapeable endpoint of - a Kubernetes Pod serving Prometheus metrics. properties: authorization: - description: Authorization section for this endpoint properties: credentials: - description: The secret's key that contains the credentials - of the request properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic type: - description: Set the authentication type. Defaults to Bearer, - Basic will cause an error type: string type: object basicAuth: - description: 'BasicAuth allow an endpoint to authenticate over - basic authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -163,64 +104,35 @@ spec: x-kubernetes-map-type: atomic type: object bearerTokenSecret: - description: Secret to mount to read bearer token for scraping - targets. The secret needs to be in the same namespace as the - pod monitor and accessible by the Prometheus Operator. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic enableHttp2: - description: Whether to enable HTTP2. type: boolean filterRunning: - description: 'Drop pods that are not running. (Failed, Succeeded). - Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase' type: boolean followRedirects: - description: FollowRedirects configures whether scrape requests - follow HTTP 3xx redirects. type: boolean honorLabels: - description: HonorLabels chooses the metric's labels on collisions - with target labels. type: boolean honorTimestamps: - description: HonorTimestamps controls whether Prometheus respects - the timestamps present in scraped data. type: boolean interval: - description: Interval at which metrics should be scraped If - not specified Prometheus' global scrape interval is used. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string metricRelabelings: - description: MetricRelabelConfigs to apply to samples before - ingestion. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It - defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -246,85 +158,46 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label name - which may only contain ASCII letters, numbers, as - well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. type: string type: object type: array oauth2: - description: OAuth2 for the URL. Only valid in Prometheus versions - 2.27.0 and newer. properties: clientId: - description: The secret or configmap containing the OAuth2 - client id properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -332,19 +205,12 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client secret properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -353,15 +219,12 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL type: object scopes: - description: OAuth2 scopes used for the token request items: type: string type: array tokenUrl: - description: The URL to fetch the token from minLength: 1 type: string required: @@ -374,37 +237,18 @@ spec: items: type: string type: array - description: Optional HTTP URL parameters type: object path: - description: HTTP path to scrape for metrics. If empty, Prometheus - uses the default value (e.g. `/metrics`). type: string port: - description: Name of the pod port this endpoint refers to. Mutually - exclusive with targetPort. type: string proxyUrl: - description: ProxyURL eg http://proxyserver:2195 Directs scrapes - to proxy through this endpoint. type: string relabelings: - description: 'RelabelConfigs to apply to samples before scraping. - Prometheus Operator automatically adds relabelings for a few - standard Kubernetes fields. The original scrape job''s name - is available via the `__tmp_prometheus_job_name` label. More - info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It - defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -430,104 +274,59 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label name - which may only contain ASCII letters, numbers, as - well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. type: string type: object type: array scheme: - description: HTTP scheme to use for scraping. `http` and `https` - are the expected values unless you rewrite the `__scheme__` - label via relabeling. If empty, Prometheus uses the default - value `http`. enum: - http - https type: string scrapeTimeout: - description: Timeout after which the scrape is ended If not - specified, the Prometheus global scrape interval is used. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string targetPort: anyOf: - type: integer - type: string - description: 'Deprecated: Use ''port'' instead.' x-kubernetes-int-or-string: true tlsConfig: - description: TLS configuration to use when scraping the endpoint. properties: ca: - description: Certificate authority used when verifying server - certificates. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -535,43 +334,26 @@ spec: x-kubernetes-map-type: atomic type: object cert: - description: Client certificate to present when doing client-authentication. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -579,71 +361,41 @@ spec: x-kubernetes-map-type: atomic type: object insecureSkipVerify: - description: Disable target certificate validation. type: boolean keySecret: - description: Secret containing the client key file for the - targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object type: object type: array podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes Pod - onto the target. items: type: string type: array sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. format: int64 type: integer selector: - description: Selector to select Pod objects. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -655,17 +407,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic targetLimit: - description: TargetLimit defines a limit on the number of scraped - targets that will be accepted. format: int64 type: integer required: diff --git a/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml index 7ece55d2ac5e..13fc36f9aa3b 100644 --- a/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml +++ b/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml @@ -22,91 +22,53 @@ spec: - name: v1 schema: openAPIV3Schema: - description: Probe defines monitoring for a set of static targets or ingresses. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Specification of desired Ingress selection for target discovery - by Prometheus. properties: authorization: - description: Authorization section for this endpoint properties: credentials: - description: The secret's key that contains the credentials of - the request properties: key: - description: The key of the secret to select from. Must be - a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be - defined type: boolean required: - key type: object x-kubernetes-map-type: atomic type: - description: Set the authentication type. Defaults to Bearer, - Basic will cause an error type: string type: object basicAuth: - description: 'BasicAuth allow an endpoint to authenticate over basic - authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' properties: password: - description: The secret in the service monitor namespace that - contains the password for authentication. properties: key: - description: The key of the secret to select from. Must be - a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be - defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace that - contains the username for authentication. properties: key: - description: The key of the secret to select from. Must be - a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be - defined type: boolean required: - key @@ -114,62 +76,36 @@ spec: x-kubernetes-map-type: atomic type: object bearerTokenSecret: - description: Secret to mount to read bearer token for scraping targets. - The secret needs to be in the same namespace as the probe and accessible - by the Prometheus Operator. properties: key: - description: The key of the secret to select from. Must be a - valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic interval: - description: Interval at which targets are probed using the configured - prober. If not specified Prometheus' global scrape interval is used. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string jobName: - description: The job name assigned to scraped metrics by default. type: string labelLimit: - description: Per-scrape limit on number of labels that will be accepted - for a sample. Only valid in Prometheus versions 2.27.0 and newer. format: int64 type: integer labelNameLengthLimit: - description: Per-scrape limit on length of labels name that will be - accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. format: int64 type: integer labelValueLengthLimit: - description: Per-scrape limit on length of labels value that will - be accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. format: int64 type: integer metricRelabelings: - description: MetricRelabelConfigs to apply to samples before ingestion. items: - description: 'RelabelConfig allows dynamic rewriting of the label - set, being applied to samples before ingestion. It defines ``-section - of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. Default - is 'replace'. uppercase and lowercase actions require Prometheus - >= 2.36. enum: - replace - Replace @@ -195,86 +131,48 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source label - values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex capture - groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source label - values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing labels. - Their content is concatenated using the configured separator - and matched against the configured regular expression for - the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label name which - may only contain ASCII letters, numbers, as well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written in - a replace action. It is mandatory for replace actions. Regex - capture groups are available. type: string type: object type: array module: - description: 'The module to use for probing specifying how to probe - the target. Example module configuring in the blackbox exporter: - https://github.com/prometheus/blackbox_exporter/blob/master/example.yml' type: string oauth2: - description: OAuth2 for the URL. Only valid in Prometheus versions - 2.27.0 and newer. properties: clientId: - description: The secret or configmap containing the OAuth2 client - id properties: configMap: - description: ConfigMap containing data to use for the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -282,19 +180,12 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client secret properties: key: - description: The key of the secret to select from. Must be - a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be - defined type: boolean required: - key @@ -303,15 +194,12 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL type: object scopes: - description: OAuth2 scopes used for the token request items: type: string type: array tokenUrl: - description: The URL to fetch the token from minLength: 1 type: string required: @@ -320,87 +208,49 @@ spec: - tokenUrl type: object prober: - description: Specification for the prober to use for probing targets. - The prober.URL parameter is required. Targets cannot be probed if - left empty. properties: path: default: /probe - description: Path to collect metrics from. Defaults to `/probe`. type: string proxyUrl: - description: Optional ProxyURL. type: string scheme: - description: HTTP scheme to use for scraping. `http` and `https` - are the expected values unless you rewrite the `__scheme__` - label via relabeling. If empty, Prometheus uses the default - value `http`. enum: - http - https type: string url: - description: Mandatory URL of the prober. type: string required: - url type: object sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. format: int64 type: integer scrapeTimeout: - description: Timeout for scraping metrics from the Prometheus exporter. - If not specified, the Prometheus global scrape timeout is used. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string targetLimit: - description: TargetLimit defines a limit on the number of scraped - targets that will be accepted. format: int64 type: integer targets: - description: Targets defines a set of static or dynamically discovered - targets to probe. properties: ingress: - description: ingress defines the Ingress objects to probe and - the relabeling configuration. If `staticConfig` is also defined, - `staticConfig` takes precedence. properties: namespaceSelector: - description: From which namespaces to select Ingress objects. properties: any: - description: Boolean describing whether all namespaces - are selected in contrast to a list restricting them. type: boolean matchNames: - description: List of namespace names to select from. items: type: string type: array type: object relabelingConfigs: - description: 'RelabelConfigs to apply to the label set of - the target before it gets scraped. The original ingress - address is available via the `__tmp_prometheus_ingress_address` - label. It can be used to customize the probed URL. The original - scrape job''s name is available via the `__tmp_prometheus_job_name` - label. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: - description: 'RelabelConfig allows dynamic rewriting of - the label set, being applied to samples before ingestion. - It defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -426,69 +276,33 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex - replace is performed if the regular expression matches. - Regex capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label - name which may only contain ASCII letters, numbers, - as well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. type: string type: object type: array selector: - description: Selector to select the Ingress objects. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. items: type: string type: array @@ -500,40 +314,21 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic type: object staticConfig: - description: 'staticConfig defines the static list of targets - to probe and the relabeling configuration. If `ingress` is also - defined, `staticConfig` takes precedence. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.' properties: labels: additionalProperties: type: string - description: Labels assigned to all metrics scraped from the - targets. type: object relabelingConfigs: - description: 'RelabelConfigs to apply to the label set of - the targets before it gets scraped. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: - description: 'RelabelConfig allows dynamic rewriting of - the label set, being applied to samples before ingestion. - It defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -559,88 +354,52 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex - replace is performed if the regular expression matches. - Regex capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label - name which may only contain ASCII letters, numbers, - as well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. type: string type: object type: array static: - description: The list of hosts to probe. items: type: string type: array type: object type: object tlsConfig: - description: TLS configuration to use when scraping the endpoint. properties: ca: - description: Certificate authority used when verifying server - certificates. properties: configMap: - description: ConfigMap containing data to use for the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -648,40 +407,26 @@ spec: x-kubernetes-map-type: atomic type: object cert: - description: Client certificate to present when doing client-authentication. properties: configMap: - description: ConfigMap containing data to use for the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -689,29 +434,20 @@ spec: x-kubernetes-map-type: atomic type: object insecureSkipVerify: - description: Disable target certificate validation. type: boolean keySecret: - description: Secret containing the client key file for the targets. properties: key: - description: The key of the secret to select from. Must be - a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be - defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object type: object diff --git a/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml index 5d661184cfb4..ff62f8f267b2 100644 --- a/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml +++ b/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml @@ -22,106 +22,61 @@ spec: - name: v1 schema: openAPIV3Schema: - description: ServiceMonitor defines monitoring for a set of services. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Specification of desired Service selection for target discovery - by Prometheus. properties: attachMetadata: - description: Attaches node metadata to discovered targets. Requires - Prometheus v2.37.0 and above. properties: node: - description: When set to true, Prometheus must have permissions - to get Nodes. type: boolean type: object endpoints: - description: A list of endpoints allowed as part of this ServiceMonitor. items: - description: Endpoint defines a scrapeable endpoint serving Prometheus - metrics. properties: authorization: - description: Authorization section for this endpoint properties: credentials: - description: The secret's key that contains the credentials - of the request properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic type: - description: Set the authentication type. Defaults to Bearer, - Basic will cause an error type: string type: object basicAuth: - description: 'BasicAuth allow an endpoint to authenticate over - basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -129,67 +84,37 @@ spec: x-kubernetes-map-type: atomic type: object bearerTokenFile: - description: File to read bearer token for scraping targets. type: string bearerTokenSecret: - description: Secret to mount to read bearer token for scraping - targets. The secret needs to be in the same namespace as the - service monitor and accessible by the Prometheus Operator. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic enableHttp2: - description: Whether to enable HTTP2. type: boolean filterRunning: - description: 'Drop pods that are not running. (Failed, Succeeded). - Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase' type: boolean followRedirects: - description: FollowRedirects configures whether scrape requests - follow HTTP 3xx redirects. type: boolean honorLabels: - description: HonorLabels chooses the metric's labels on collisions - with target labels. type: boolean honorTimestamps: - description: HonorTimestamps controls whether Prometheus respects - the timestamps present in scraped data. type: boolean interval: - description: Interval at which metrics should be scraped If - not specified Prometheus' global scrape interval is used. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string metricRelabelings: - description: MetricRelabelConfigs to apply to samples before - ingestion. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It - defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -215,85 +140,46 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label name - which may only contain ASCII letters, numbers, as - well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. type: string type: object type: array oauth2: - description: OAuth2 for the URL. Only valid in Prometheus versions - 2.27.0 and newer. properties: clientId: - description: The secret or configmap containing the OAuth2 - client id properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -301,19 +187,12 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client secret properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -322,15 +201,12 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL type: object scopes: - description: OAuth2 scopes used for the token request items: type: string type: array tokenUrl: - description: The URL to fetch the token from minLength: 1 type: string required: @@ -343,37 +219,18 @@ spec: items: type: string type: array - description: Optional HTTP URL parameters type: object path: - description: HTTP path to scrape for metrics. If empty, Prometheus - uses the default value (e.g. `/metrics`). type: string port: - description: Name of the service port this endpoint refers to. - Mutually exclusive with targetPort. type: string proxyUrl: - description: ProxyURL eg http://proxyserver:2195 Directs scrapes - to proxy through this endpoint. type: string relabelings: - description: 'RelabelConfigs to apply to samples before scraping. - Prometheus Operator automatically adds relabelings for a few - standard Kubernetes fields. The original scrape job''s name - is available via the `__tmp_prometheus_job_name` label. More - info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It - defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -399,107 +256,59 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label name - which may only contain ASCII letters, numbers, as - well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. type: string type: object type: array scheme: - description: HTTP scheme to use for scraping. `http` and `https` - are the expected values unless you rewrite the `__scheme__` - label via relabeling. If empty, Prometheus uses the default - value `http`. enum: - http - https type: string scrapeTimeout: - description: Timeout after which the scrape is ended If not - specified, the Prometheus global scrape timeout is used unless - it is less than `Interval` in which the latter is used. pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string targetPort: anyOf: - type: integer - type: string - description: Name or number of the target port of the Pod behind - the Service, the port must be specified with container port - property. Mutually exclusive with port. x-kubernetes-int-or-string: true tlsConfig: - description: TLS configuration to use when scraping the endpoint properties: ca: - description: Certificate authority used when verifying server - certificates. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -507,47 +316,28 @@ spec: x-kubernetes-map-type: atomic type: object caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. type: string cert: - description: Client certificate to present when doing client-authentication. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -555,119 +345,65 @@ spec: x-kubernetes-map-type: atomic type: object certFile: - description: Path to the client cert file in the Prometheus - container for the targets. type: string insecureSkipVerify: - description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. type: string keySecret: - description: Secret containing the client key file for the - targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object type: object type: array jobLabel: - description: "JobLabel selects the label from the associated Kubernetes - service which will be used as the `job` label for all metrics. \n - For example: If in `ServiceMonitor.spec.jobLabel: foo` and in `Service.metadata.labels.foo: - bar`, then the `job=\"bar\"` label is added to all metrics. \n If - the value of this field is empty or if the label doesn't exist for - the given Service, the `job` label of the metrics defaults to the - name of the Kubernetes Service." type: string labelLimit: - description: Per-scrape limit on number of labels that will be accepted - for a sample. Only valid in Prometheus versions 2.27.0 and newer. format: int64 type: integer labelNameLengthLimit: - description: Per-scrape limit on length of labels name that will be - accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. format: int64 type: integer labelValueLengthLimit: - description: Per-scrape limit on length of labels value that will - be accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. format: int64 type: integer namespaceSelector: - description: Selector to select which namespaces the Kubernetes Endpoints - objects are discovered from. properties: any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. type: boolean matchNames: - description: List of namespace names to select from. items: type: string type: array type: object podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes `Pod` - onto the created metrics. items: type: string type: array sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. format: int64 type: integer selector: - description: Selector to select Endpoints objects. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -679,23 +415,14 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic targetLabels: - description: TargetLabels transfers labels from the Kubernetes `Service` - onto the created metrics. items: type: string type: array targetLimit: - description: TargetLimit defines a limit on the number of scraped - targets that will be accepted. format: int64 type: integer required: diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml index fab68b18e6f6..4ec31d67a4e8 100644 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml +++ b/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml @@ -20,81 +20,32 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: GrafanaAgent defines a Grafana Agent deployment. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Spec holds the specification of the desired behavior for - the Grafana Agent cluster. properties: affinity: - description: Affinity, if specified, controls pod scheduling constraints. properties: nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). properties: preference: - description: A node selector term, associated with the - corresponding weight. properties: matchExpressions: - description: A list of node selector requirements - by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: The label key that the selector - applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. items: type: string type: array @@ -104,33 +55,13 @@ spec: type: object type: array matchFields: - description: A list of node selector requirements - by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: The label key that the selector - applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. items: type: string type: array @@ -142,8 +73,6 @@ spec: type: object x-kubernetes-map-type: atomic weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. format: int32 type: integer required: @@ -152,50 +81,18 @@ spec: type: object type: array requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. properties: nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. properties: matchExpressions: - description: A list of node selector requirements - by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: The label key that the selector - applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. items: type: string type: array @@ -205,33 +102,13 @@ spec: type: object type: array matchFields: - description: A list of node selector requirements - by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: The label key that the selector - applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. items: type: string type: array @@ -249,61 +126,22 @@ spec: x-kubernetes-map-type: atomic type: object podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) properties: podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. properties: key: - description: key is the label key that - the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. items: type: string type: array @@ -315,52 +153,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. properties: key: - description: key is the label key that - the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. items: type: string type: array @@ -372,40 +177,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. format: int32 type: integer required: @@ -414,52 +198,18 @@ spec: type: object type: array requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: key is the label key that the - selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. items: type: string type: array @@ -471,47 +221,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: key is the label key that the - selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. items: type: string type: array @@ -523,33 +245,14 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. type: string required: - topologyKey @@ -557,62 +260,22 @@ spec: type: array type: object podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) properties: podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. properties: key: - description: key is the label key that - the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. items: type: string type: array @@ -624,52 +287,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. properties: key: - description: key is the label key that - the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. items: type: string type: array @@ -681,40 +311,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. format: int32 type: integer required: @@ -723,52 +332,18 @@ spec: type: object type: array requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: key is the label key that the - selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. items: type: string type: array @@ -780,47 +355,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. properties: key: - description: key is the label key that the - selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. items: type: string type: array @@ -832,33 +379,14 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. type: string required: - topologyKey @@ -867,83 +395,47 @@ spec: type: object type: object apiServer: - description: APIServerConfig lets you specify a host and auth methods - to access the Kubernetes API server. If left empty, the Agent assumes - that it is running inside of the cluster and will discover API servers - automatically and use the pod's CA certificate and bearer token - file at /var/run/secrets/kubernetes.io/serviceaccount. properties: authorization: - description: Authorization section for accessing apiserver properties: credentials: - description: The secret's key that contains the credentials - of the request properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic credentialsFile: - description: File to read a secret from, mutually exclusive - with Credentials (from SafeAuthorization) type: string type: - description: Set the authentication type. Defaults to Bearer, - Basic will cause an error type: string type: object basicAuth: - description: BasicAuth allow an endpoint to authenticate over - basic authentication properties: password: - description: The secret in the service monitor namespace that - contains the password for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace that - contains the username for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -951,57 +443,34 @@ spec: x-kubernetes-map-type: atomic type: object bearerToken: - description: Bearer token for accessing apiserver. type: string bearerTokenFile: - description: File to read bearer token for accessing apiserver. type: string host: - description: Host of apiserver. A valid string consisting of a - hostname or IP followed by an optional port number type: string tlsConfig: - description: TLS Config to use for accessing apiserver. properties: ca: - description: Certificate authority used when verifying server - certificates. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -1009,47 +478,28 @@ spec: x-kubernetes-map-type: atomic type: object caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. type: string cert: - description: Client certificate to present when doing client-authentication. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -1057,210 +507,102 @@ spec: x-kubernetes-map-type: atomic type: object certFile: - description: Path to the client cert file in the Prometheus - container for the targets. type: string insecureSkipVerify: - description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. type: string keySecret: - description: Secret containing the client key file for the - targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object required: - host type: object configMaps: - description: ConfigMaps is a list of config maps in the same namespace - as the GrafanaAgent object which will be mounted into each running - Grafana Agent pod. The ConfigMaps are mounted into /var/lib/grafana-agent/extra-configmaps/. items: type: string type: array configReloaderImage: - description: Image, when specified, overrides the image used to run - Config Reloader. Specify the image along with a tag. You still need - to set the version to ensure Grafana Agent Operator knows which - version of Grafana Agent is being configured. type: string configReloaderVersion: - description: Version of Config Reloader to be deployed. type: string containers: - description: 'Containers lets you inject additional containers or - modify operator-generated containers. This can be used to add an - authentication proxy to a Grafana Agent pod or to change the behavior - of an operator-generated container. Containers described here modify - an operator-generated container if they share the same name and - if modifications are done via a strategic merge patch. The current - container names are: `grafana-agent` and `config-reloader`. Overriding - containers is entirely outside the scope of what the Grafana Agent - team supports and by doing so, you accept that this behavior may - break at any time without notice.' items: - description: A single application container that you want to run - within a pod. properties: args: - description: 'Arguments to the entrypoint. The container image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will - be unchanged. Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the variable - exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' items: type: string type: array command: - description: 'Entrypoint array. Not executed within a shell. - The container image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: - i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether - the variable exists or not. Cannot be updated. More info: - https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' items: type: string type: array env: - description: List of environment variables to set in the container. - Cannot be updated. items: - description: EnvVar represents an environment variable present - in a Container. properties: name: - description: Name of the environment variable. Must be - a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. - If a variable cannot be resolved, the reference in the - input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists - or not. Defaults to "".' type: string valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. properties: configMapKeyRef: - description: Selects a key of a ConfigMap. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' properties: apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the - specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' properties: containerName: - description: 'Container name: required for volumes, - optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource type: object x-kubernetes-map-type: atomic secretKeyRef: - description: Selects a key of a secret in the pod's - namespace properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -1272,111 +614,53 @@ spec: type: object type: array envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be - a C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key - will take precedence. Cannot be updated. items: - description: EnvFromSource represents the source of a set - of ConfigMaps properties: configMapRef: - description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap must be - defined type: boolean type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend to each - key in the ConfigMap. Must be a C_IDENTIFIER. type: string secretRef: - description: The Secret to select from properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret must be defined type: boolean type: object x-kubernetes-map-type: atomic type: object type: array image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' type: string imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string lifecycle: - description: Actions that the management system should take - in response to container lifecycle events. Cannot be updated. properties: postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's - filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you need - to explicitly call out to that shell. Exit status - of 0 is treated as live/healthy and non-zero is - unhealthy. items: type: string type: array type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in - httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -1384,97 +668,49 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward compatibility. - There are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler is specified. properties: host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The Pod''s termination grace period - countdown begins before the PreStop hook is executed. - Regardless of the outcome of the handler, the container - will eventually terminate within the Pod''s termination - grace period (unless delayed by finalizers). Other management - of the container blocks until the hook completes or until - the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's - filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you need - to explicitly call out to that shell. Exit status - of 0 is treated as live/healthy and non-zero is - unhealthy. items: type: string type: array type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in - httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -1482,40 +718,25 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward compatibility. - There are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler is specified. properties: host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -1523,71 +744,37 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for the - command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for the probe - to be considered failed after having succeeded. Defaults - to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. format: int32 type: integer service: - description: "Service is the name of the service to - place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -1595,126 +782,62 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container has - started before liveness probes are initiated. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the probe - to be considered successful after having failed. Defaults - to 1. Must be 1 for liveness and startup. Minimum value - is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP - port. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs - to terminate gracefully upon probe failure. The grace - period is the duration in seconds after the processes - running in the pod are sent a termination signal and the - time when the processes are forcibly halted with a kill - signal. Set this value longer than the expected cleanup - time for your process. If this value is nil, the pod's - terminationGracePeriodSeconds will be used. Otherwise, - this value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity to - shut down). This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. Minimum value - is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer type: object name: - description: Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. type: string ports: - description: List of ports to expose from the container. Not - specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Modifying this array with strategic merge patch may corrupt - the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. items: - description: ContainerPort represents a network port in a - single container. properties: containerPort: - description: Number of port to expose on the pod's IP - address. This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: - description: What host IP to bind the external port to. type: string hostPort: - description: Number of port to expose on the host. If - specified, this must be a valid port number, 0 < x < - 65536. If HostNetwork is specified, this must match - ContainerPort. Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod - must have a unique name. Name for the port that can - be referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". type: string required: - containerPort @@ -1725,71 +848,37 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe - fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for the - command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for the probe - to be considered failed after having succeeded. Defaults - to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. format: int32 type: integer service: - description: "Service is the name of the service to - place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -1797,97 +886,51 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container has - started before liveness probes are initiated. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the probe - to be considered successful after having failed. Defaults - to 1. Must be 1 for liveness and startup. Minimum value - is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP - port. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs - to terminate gracefully upon probe failure. The grace - period is the duration in seconds after the processes - running in the pod are sent a termination signal and the - time when the processes are forcibly halted with a kill - signal. Set this value longer than the expected cleanup - time for your process. If this value is nil, the pod's - terminationGracePeriodSeconds will be used. Otherwise, - this value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity to - shut down). This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. Minimum value - is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer type: object resizePolicy: - description: Resources resize policy for the container. items: - description: ContainerResizePolicy represents resource resize - policy for the container. properties: resourceName: - description: 'Name of the resource to which this resource - resize policy applies. Supported values: cpu, memory.' type: string restartPolicy: - description: Restart policy to apply when specified resource - is resized. If not specified, it defaults to NotRequired. type: string required: - resourceName @@ -1896,23 +939,11 @@ spec: type: array x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this container. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only - be set for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. type: string required: - name @@ -1928,8 +959,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -1938,275 +967,103 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object restartPolicy: - description: 'RestartPolicy defines the restart behavior of - individual containers in a pod. This field may only be set - for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod''s restart policy - and the container type. Setting the RestartPolicy as "Always" - for the init container will have the following effect: this - init container will be continually restarted on exit until - all regular containers have terminated. Once all regular containers - have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init - containers and is often referred to as a "sidecar" container. - Although this init container still starts in the init container - sequence, it does not wait for the container to complete before - proceeding to the next init container. Instead, the next init - container starts immediately after this init container is - started, or after any startupProbe has successfully completed.' type: string securityContext: - description: 'SecurityContext defines the security options the - container should be run with. If set, the fields of SecurityContext - override the equivalent fields of PodSecurityContext. More - info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether - a process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN Note that this field cannot be set - when spec.os.name is windows.' type: boolean capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by - the container runtime. Note that this field cannot be - set when spec.os.name is windows. properties: add: - description: Added capabilities items: - description: Capability represent POSIX capabilities - type type: string type: array drop: - description: Removed capabilities items: - description: Capability represent POSIX capabilities - type type: string type: array type: object privileged: - description: Run container in privileged mode. Processes - in privileged containers are essentially equivalent to - root on the host. Defaults to false. Note that this field - cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc mount to - use for the containers. The default is DefaultProcMount - which uses the container runtime defaults for readonly - paths and masked paths. This requires the ProcMountType - feature flag to be enabled. Note that this field cannot - be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only root - filesystem. Default is false. Note that this field cannot - be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. Note that this field cannot be set when - spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run as a - non-root user. If true, the Kubelet will validate the - image at runtime to ensure that it does not run as UID - 0 (root) and fail to start the container if it does. If - unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata - if unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. Note - that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a - random SELinux context for each container. May also be - set in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. Note that this field cannot be set when - spec.os.name is windows. properties: level: - description: Level is SELinux level label that applies - to the container. type: string role: - description: Role is a SELinux role label that applies - to the container. type: string type: - description: Type is a SELinux type label that applies - to the container. type: string user: - description: User is a SELinux user label that applies - to the container. type: string type: object seccompProfile: - description: The seccomp options to use by this container. - If seccomp options are provided at both the pod & container - level, the container options override the pod options. - Note that this field cannot be set when spec.os.name is - windows. properties: localhostProfile: - description: localhostProfile indicates a profile defined - in a file on the node should be used. The profile - must be preconfigured on the node to work. Must be - a descending path, relative to the kubelet's configured - seccomp profile location. Must be set if type is "Localhost". - Must NOT be set for any other type. type: string type: - description: "type indicates which kind of seccomp profile - will be applied. Valid options are: \n Localhost - - a profile defined in a file on the node should be - used. RuntimeDefault - the container runtime default - profile should be used. Unconfined - no profile should - be applied." type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is - linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a container should - be run as a 'Host Process' container. All of a Pod's - containers must have the same effective HostProcess - value (it is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In addition, - if HostProcess is true then HostNetwork must also - be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set - in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod - will be restarted, just as if the livenessProbe failed. This - can be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. - This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for the - command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for the probe - to be considered failed after having succeeded. Defaults - to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. format: int32 type: integer service: - description: "Service is the name of the service to - place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -2214,139 +1071,61 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container has - started before liveness probes are initiated. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the probe - to be considered successful after having failed. Defaults - to 1. Must be 1 for liveness and startup. Minimum value - is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP - port. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs - to terminate gracefully upon probe failure. The grace - period is the duration in seconds after the processes - running in the pod are sent a termination signal and the - time when the processes are forcibly halted with a kill - signal. Set this value longer than the expected cleanup - time for your process. If this value is nil, the pod's - terminationGracePeriodSeconds will be used. Otherwise, - this value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity to - shut down). This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. Minimum value - is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer type: object stdin: - description: Whether this container should allocate a buffer - for stdin in the container runtime. If this is not set, reads - from stdin in the container will always result in EOF. Default - is false. type: boolean stdinOnce: - description: Whether the container runtime should close the - stdin channel after it has been opened by a single attach. - When stdin is true the stdin stream will remain open across - multiple attach sessions. If stdinOnce is set to true, stdin - is opened on container start, is empty until the first client - attaches to stdin, and then remains open and accepts data - until the client disconnects, at which time stdin is closed - and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin - will never receive an EOF. Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to which the - container''s termination message will be written is mounted - into the container''s filesystem. Message written is intended - to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. - The total message length across all containers will be limited - to 12kb. Defaults to /dev/termination-log. Cannot be updated.' type: string terminationMessagePolicy: - description: Indicate how the termination message should be - populated. File will use the contents of terminationMessagePath - to populate the container status message on both success and - failure. FallbackToLogsOnError will use the last chunk of - container log output if the termination message file is empty - and the container exited with an error. The log output is - limited to 2048 bytes or 80 lines, whichever is smaller. Defaults - to File. Cannot be updated. type: string tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. type: boolean volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. items: - description: volumeDevice describes a mapping of a raw block - device within a container. properties: devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. type: string name: - description: name must match the name of a persistentVolumeClaim - in the pod type: string required: - devicePath @@ -2354,40 +1133,19 @@ spec: type: object type: array volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. items: - description: VolumeMount describes a mounting of a Volume - within a container. properties: mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other - way around. When not set, MountPropagationNone is used. - This field is beta in 1.10. type: string name: - description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. type: boolean subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. type: string required: - mountPath @@ -2395,9 +1153,6 @@ spec: type: object type: array workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might - be configured in the container image. Cannot be updated. type: string required: - name @@ -2405,190 +1160,88 @@ spec: type: array disableReporting: default: false - description: disableReporting disables reporting of enabled feature - flags to Grafana. type: boolean disableSupportBundle: default: false - description: disableSupportBundle disables the generation of support - bundles. type: boolean enableConfigReadAPI: default: false - description: enableConfigReadAPI enables the read API for viewing - the currently running config port 8080 on the agent. type: boolean image: - description: Image, when specified, overrides the image used to run - Agent. Specify the image along with a tag. You still need to set - the version to ensure Grafana Agent Operator knows which version - of Grafana Agent is being configured. type: string imagePullSecrets: - description: 'ImagePullSecrets holds an optional list of references - to Secrets within the same namespace used for pulling the Grafana - Agent image from registries. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod' items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic type: array initContainers: - description: 'InitContainers let you add initContainers to the pod - definition. These can be used to, for example, fetch secrets for - injection into the Grafana Agent configuration from external sources. - Errors during the execution of an initContainer cause the pod to - restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - Using initContainers for any use case other than secret fetching - is entirely outside the scope of what the Grafana Agent maintainers - support and by doing so, you accept that this behavior may break - at any time without notice.' items: - description: A single application container that you want to run - within a pod. properties: args: - description: 'Arguments to the entrypoint. The container image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will - be unchanged. Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the variable - exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' items: type: string type: array command: - description: 'Entrypoint array. Not executed within a shell. - The container image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: - i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether - the variable exists or not. Cannot be updated. More info: - https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' items: type: string type: array env: - description: List of environment variables to set in the container. - Cannot be updated. items: - description: EnvVar represents an environment variable present - in a Container. properties: name: - description: Name of the environment variable. Must be - a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. - If a variable cannot be resolved, the reference in the - input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists - or not. Defaults to "".' type: string valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. properties: configMapKeyRef: - description: Selects a key of a ConfigMap. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' properties: apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the - specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' properties: containerName: - description: 'Container name: required for volumes, - optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource type: object x-kubernetes-map-type: atomic secretKeyRef: - description: Selects a key of a secret in the pod's - namespace properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -2600,111 +1253,53 @@ spec: type: object type: array envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be - a C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key - will take precedence. Cannot be updated. items: - description: EnvFromSource represents the source of a set - of ConfigMaps properties: configMapRef: - description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap must be - defined type: boolean type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend to each - key in the ConfigMap. Must be a C_IDENTIFIER. type: string secretRef: - description: The Secret to select from properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret must be defined type: boolean type: object x-kubernetes-map-type: atomic type: object type: array image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' type: string imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string lifecycle: - description: Actions that the management system should take - in response to container lifecycle events. Cannot be updated. properties: postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's - filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you need - to explicitly call out to that shell. Exit status - of 0 is treated as live/healthy and non-zero is - unhealthy. items: type: string type: array type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in - httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -2712,97 +1307,49 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward compatibility. - There are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler is specified. properties: host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The Pod''s termination grace period - countdown begins before the PreStop hook is executed. - Regardless of the outcome of the handler, the container - will eventually terminate within the Pod''s termination - grace period (unless delayed by finalizers). Other management - of the container blocks until the hook completes or until - the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's - filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you need - to explicitly call out to that shell. Exit status - of 0 is treated as live/healthy and non-zero is - unhealthy. items: type: string type: array type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in - httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -2810,40 +1357,25 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward compatibility. - There are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler is specified. properties: host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -2851,71 +1383,37 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for the - command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for the probe - to be considered failed after having succeeded. Defaults - to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. format: int32 type: integer service: - description: "Service is the name of the service to - place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -2923,126 +1421,62 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container has - started before liveness probes are initiated. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the probe - to be considered successful after having failed. Defaults - to 1. Must be 1 for liveness and startup. Minimum value - is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP - port. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs - to terminate gracefully upon probe failure. The grace - period is the duration in seconds after the processes - running in the pod are sent a termination signal and the - time when the processes are forcibly halted with a kill - signal. Set this value longer than the expected cleanup - time for your process. If this value is nil, the pod's - terminationGracePeriodSeconds will be used. Otherwise, - this value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity to - shut down). This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. Minimum value - is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer type: object name: - description: Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. type: string ports: - description: List of ports to expose from the container. Not - specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Modifying this array with strategic merge patch may corrupt - the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. items: - description: ContainerPort represents a network port in a - single container. properties: containerPort: - description: Number of port to expose on the pod's IP - address. This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: - description: What host IP to bind the external port to. type: string hostPort: - description: Number of port to expose on the host. If - specified, this must be a valid port number, 0 < x < - 65536. If HostNetwork is specified, this must match - ContainerPort. Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod - must have a unique name. Name for the port that can - be referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". type: string required: - containerPort @@ -3053,71 +1487,37 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe - fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for the - command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for the probe - to be considered failed after having succeeded. Defaults - to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. format: int32 type: integer service: - description: "Service is the name of the service to - place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -3125,97 +1525,51 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container has - started before liveness probes are initiated. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the probe - to be considered successful after having failed. Defaults - to 1. Must be 1 for liveness and startup. Minimum value - is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP - port. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs - to terminate gracefully upon probe failure. The grace - period is the duration in seconds after the processes - running in the pod are sent a termination signal and the - time when the processes are forcibly halted with a kill - signal. Set this value longer than the expected cleanup - time for your process. If this value is nil, the pod's - terminationGracePeriodSeconds will be used. Otherwise, - this value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity to - shut down). This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. Minimum value - is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer type: object resizePolicy: - description: Resources resize policy for the container. items: - description: ContainerResizePolicy represents resource resize - policy for the container. properties: resourceName: - description: 'Name of the resource to which this resource - resize policy applies. Supported values: cpu, memory.' type: string restartPolicy: - description: Restart policy to apply when specified resource - is resized. If not specified, it defaults to NotRequired. type: string required: - resourceName @@ -3224,23 +1578,11 @@ spec: type: array x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this container. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only - be set for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. type: string required: - name @@ -3256,8 +1598,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -3266,275 +1606,103 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object restartPolicy: - description: 'RestartPolicy defines the restart behavior of - individual containers in a pod. This field may only be set - for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod''s restart policy - and the container type. Setting the RestartPolicy as "Always" - for the init container will have the following effect: this - init container will be continually restarted on exit until - all regular containers have terminated. Once all regular containers - have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init - containers and is often referred to as a "sidecar" container. - Although this init container still starts in the init container - sequence, it does not wait for the container to complete before - proceeding to the next init container. Instead, the next init - container starts immediately after this init container is - started, or after any startupProbe has successfully completed.' type: string securityContext: - description: 'SecurityContext defines the security options the - container should be run with. If set, the fields of SecurityContext - override the equivalent fields of PodSecurityContext. More - info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether - a process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN Note that this field cannot be set - when spec.os.name is windows.' type: boolean capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by - the container runtime. Note that this field cannot be - set when spec.os.name is windows. properties: add: - description: Added capabilities items: - description: Capability represent POSIX capabilities - type type: string type: array drop: - description: Removed capabilities items: - description: Capability represent POSIX capabilities - type type: string type: array type: object privileged: - description: Run container in privileged mode. Processes - in privileged containers are essentially equivalent to - root on the host. Defaults to false. Note that this field - cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc mount to - use for the containers. The default is DefaultProcMount - which uses the container runtime defaults for readonly - paths and masked paths. This requires the ProcMountType - feature flag to be enabled. Note that this field cannot - be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only root - filesystem. Default is false. Note that this field cannot - be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. Note that this field cannot be set when - spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run as a - non-root user. If true, the Kubelet will validate the - image at runtime to ensure that it does not run as UID - 0 (root) and fail to start the container if it does. If - unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata - if unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. Note - that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a - random SELinux context for each container. May also be - set in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. Note that this field cannot be set when - spec.os.name is windows. properties: level: - description: Level is SELinux level label that applies - to the container. type: string role: - description: Role is a SELinux role label that applies - to the container. type: string type: - description: Type is a SELinux type label that applies - to the container. type: string user: - description: User is a SELinux user label that applies - to the container. type: string type: object seccompProfile: - description: The seccomp options to use by this container. - If seccomp options are provided at both the pod & container - level, the container options override the pod options. - Note that this field cannot be set when spec.os.name is - windows. properties: localhostProfile: - description: localhostProfile indicates a profile defined - in a file on the node should be used. The profile - must be preconfigured on the node to work. Must be - a descending path, relative to the kubelet's configured - seccomp profile location. Must be set if type is "Localhost". - Must NOT be set for any other type. type: string type: - description: "type indicates which kind of seccomp profile - will be applied. Valid options are: \n Localhost - - a profile defined in a file on the node should be - used. RuntimeDefault - the container runtime default - profile should be used. Unconfined - no profile should - be applied." type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is - linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a container should - be run as a 'Host Process' container. All of a Pod's - containers must have the same effective HostProcess - value (it is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). In addition, - if HostProcess is true then HostNetwork must also - be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set - in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod - will be restarted, just as if the livenessProbe failed. This - can be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. - This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory for the - command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for the probe - to be considered failed after having succeeded. Defaults - to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. format: int32 type: integer service: - description: "Service is the name of the service to - place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. items: - description: HTTPHeader describes a custom header - to be used in HTTP probes properties: name: - description: The header field name. This will - be canonicalized upon output, so case-variant - names will be understood as the same header. type: string value: - description: The header field value type: string required: - name @@ -3542,139 +1710,61 @@ spec: type: object type: array path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: Name or number of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container has - started before liveness probes are initiated. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the probe - to be considered successful after having failed. Defaults - to 1. Must be 1 for liveness and startup. Minimum value - is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP - port. properties: host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: Number or name of the port to access on - the container. Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs - to terminate gracefully upon probe failure. The grace - period is the duration in seconds after the processes - running in the pod are sent a termination signal and the - time when the processes are forcibly halted with a kill - signal. Set this value longer than the expected cleanup - time for your process. If this value is nil, the pod's - terminationGracePeriodSeconds will be used. Otherwise, - this value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity to - shut down). This is a beta field and requires enabling - ProbeTerminationGracePeriod feature gate. Minimum value - is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer type: object stdin: - description: Whether this container should allocate a buffer - for stdin in the container runtime. If this is not set, reads - from stdin in the container will always result in EOF. Default - is false. type: boolean stdinOnce: - description: Whether the container runtime should close the - stdin channel after it has been opened by a single attach. - When stdin is true the stdin stream will remain open across - multiple attach sessions. If stdinOnce is set to true, stdin - is opened on container start, is empty until the first client - attaches to stdin, and then remains open and accepts data - until the client disconnects, at which time stdin is closed - and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin - will never receive an EOF. Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to which the - container''s termination message will be written is mounted - into the container''s filesystem. Message written is intended - to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. - The total message length across all containers will be limited - to 12kb. Defaults to /dev/termination-log. Cannot be updated.' type: string terminationMessagePolicy: - description: Indicate how the termination message should be - populated. File will use the contents of terminationMessagePath - to populate the container status message on both success and - failure. FallbackToLogsOnError will use the last chunk of - container log output if the termination message file is empty - and the container exited with an error. The log output is - limited to 2048 bytes or 80 lines, whichever is smaller. Defaults - to File. Cannot be updated. type: string tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. type: boolean volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. items: - description: volumeDevice describes a mapping of a raw block - device within a container. properties: devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. type: string name: - description: name must match the name of a persistentVolumeClaim - in the pod type: string required: - devicePath @@ -3682,40 +1772,19 @@ spec: type: object type: array volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. items: - description: VolumeMount describes a mounting of a Volume - within a container. properties: mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other - way around. When not set, MountPropagationNone is used. - This field is beta in 1.10. type: string name: - description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. type: boolean subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. type: string required: - mountPath @@ -3723,47 +1792,23 @@ spec: type: object type: array workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might - be configured in the container image. Cannot be updated. type: string required: - name type: object type: array integrations: - description: Integrations controls the integration subsystem of the - Agent and settings unique to deployed integration-specific pods. properties: namespaceSelector: - description: "Label selector for namespaces to search when discovering - integration resources. If nil, integration resources are only - discovered in the namespace of the GrafanaAgent resource. \n - Set to `{}` to search all namespaces." properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. items: type: string type: array @@ -3775,41 +1820,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic selector: - description: Label selector to find Integration resources to run. - When nil, no integration resources will be defined. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. items: type: string type: array @@ -3821,91 +1844,49 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic type: object logFormat: - description: LogFormat controls the logging format of the generated - pods. Defaults to "logfmt" if not set. type: string logLevel: - description: LogLevel controls the log level of the generated pods. - Defaults to "info" if not set. type: string logs: - description: Logs controls the logging subsystem of the Agent and - settings unique to logging-specific pods that are deployed. properties: clients: - description: A global set of clients to use when a discovered - LogsInstance does not have any clients defined. items: - description: LogsClientSpec defines the client integration for - logs, indicating which Loki server to send logs to. properties: backoffConfig: - description: Configures how to retry requests to Loki when - a request fails. Defaults to a minPeriod of 500ms, maxPeriod - of 5m, and maxRetries of 10. properties: maxPeriod: - description: Maximum backoff time between retries. type: string maxRetries: - description: Maximum number of retries to perform before - giving up a request. type: integer minPeriod: - description: Initial backoff time between retries. Time - between retries is increased exponentially. type: string type: object basicAuth: - description: BasicAuth for the Loki server. properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -3913,70 +1894,40 @@ spec: x-kubernetes-map-type: atomic type: object batchSize: - description: Maximum batch size (in bytes) of logs to accumulate - before sending the batch to Loki. type: integer batchWait: - description: Maximum amount of time to wait before sending - a batch, even if that batch isn't full. type: string bearerToken: - description: BearerToken used for remote_write. type: string bearerTokenFile: - description: BearerTokenFile used to read bearer token. type: string externalLabels: additionalProperties: type: string - description: ExternalLabels are labels to add to any time - series when sending data to Loki. type: object oauth2: - description: Oauth2 for URL properties: clientId: - description: The secret or configmap containing the - OAuth2 client id properties: configMap: - description: ConfigMap containing data to use for - the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the - targets. properties: key: - description: The key of the secret to select - from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -3984,21 +1935,12 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client - secret properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -4007,15 +1949,12 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL type: object scopes: - description: OAuth2 scopes used for the token request items: type: string type: array tokenUrl: - description: The URL to fetch the token from minLength: 1 type: string required: @@ -4024,64 +1963,34 @@ spec: - tokenUrl type: object proxyUrl: - description: ProxyURL to proxy requests through. Optional. type: string tenantId: - description: Tenant ID used by default to push logs to Loki. - If omitted assumes remote Loki is running in single-tenant - mode or an authentication layer is used to inject an X-Scope-OrgID - header. type: string timeout: - description: Maximum time to wait for a server to respond - to a request. type: string tlsConfig: - description: TLSConfig to use for the client. Only used - when the protocol of the URL is https. properties: ca: - description: Certificate authority used when verifying - server certificates. properties: configMap: - description: ConfigMap containing data to use for - the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the - targets. properties: key: - description: The key of the secret to select - from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -4089,51 +1998,28 @@ spec: x-kubernetes-map-type: atomic type: object caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. type: string cert: - description: Client certificate to present when doing - client-authentication. properties: configMap: - description: ConfigMap containing data to use for - the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the - targets. properties: key: - description: The key of the secret to select - from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -4141,89 +2027,46 @@ spec: x-kubernetes-map-type: atomic type: object certFile: - description: Path to the client cert file in the Prometheus - container for the targets. type: string insecureSkipVerify: - description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. type: string keySecret: - description: Secret containing the client key file for - the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object url: - description: 'URL is the URL where Loki is listening. Must - be a full HTTP URL, including protocol. Required. Example: - https://logs-prod-us-central1.grafana.net/loki/api/v1/push.' type: string required: - url type: object type: array enforcedNamespaceLabel: - description: EnforcedNamespaceLabel enforces adding a namespace - label of origin for each metric that is user-created. The label - value will always be the namespace of the object that is being - created. type: string ignoreNamespaceSelectors: - description: IgnoreNamespaceSelectors, if true, will ignore NamespaceSelector - settings from the PodLogs configs, and they will only discover - endpoints within their current namespace. type: boolean instanceNamespaceSelector: - description: InstanceNamespaceSelector are the set of labels to - determine which namespaces to watch for LogInstances. If not - provided, only checks own namespace. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. items: type: string type: array @@ -4235,42 +2078,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic instanceSelector: - description: InstanceSelector determines which LogInstances should - be selected for running. Each instance runs its own set of Prometheus - components, including service discovery, scraping, and remote_write. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. items: type: string type: array @@ -4282,98 +2102,43 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic logsExternalLabelName: - description: LogsExternalLabelName is the name of the external - label used to denote Grafana Agent cluster. Defaults to "cluster." - External label will _not_ be added when value is set to the - empty string. type: string type: object metrics: - description: Metrics controls the metrics subsystem of the Agent and - settings unique to metrics-specific pods that are deployed. properties: arbitraryFSAccessThroughSMs: - description: ArbitraryFSAccessThroughSMs configures whether configuration - based on a ServiceMonitor can access arbitrary files on the - file system of the Grafana Agent container, e.g., bearer token - files. properties: deny: type: boolean type: object enforcedNamespaceLabel: - description: EnforcedNamespaceLabel enforces adding a namespace - label of origin for each metric that is user-created. The label - value is always the namespace of the object that is being created. type: string enforcedSampleLimit: - description: EnforcedSampleLimit defines a global limit on the - number of scraped samples that are accepted. This overrides - any SampleLimit set per ServiceMonitor and/or PodMonitor. It - is meant to be used by admins to enforce the SampleLimit to - keep the overall number of samples and series under the desired - limit. Note that if a SampleLimit from a ServiceMonitor or PodMonitor - is lower, that value is used instead. format: int64 type: integer enforcedTargetLimit: - description: EnforcedTargetLimit defines a global limit on the - number of scraped targets. This overrides any TargetLimit set - per ServiceMonitor and/or PodMonitor. It is meant to be used - by admins to enforce the TargetLimit to keep the overall number - of targets under the desired limit. Note that if a TargetLimit - from a ServiceMonitor or PodMonitor is higher, that value is - used instead. format: int64 type: integer externalLabels: additionalProperties: type: string - description: ExternalLabels are labels to add to any time series - when sending data over remote_write. type: object ignoreNamespaceSelectors: - description: IgnoreNamespaceSelectors, if true, ignores NamespaceSelector - settings from the PodMonitor and ServiceMonitor configs, so - that they only discover endpoints within their current namespace. type: boolean instanceNamespaceSelector: - description: InstanceNamespaceSelector is the set of labels that - determines which namespaces to watch for MetricsInstances. If - not provided, it only checks its own namespace. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. items: type: string type: array @@ -4385,43 +2150,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic instanceSelector: - description: InstanceSelector determines which MetricsInstances - should be selected for running. Each instance runs its own set - of Metrics components, including service discovery, scraping, - and remote_write. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. items: type: string type: array @@ -4433,77 +2174,39 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic metricsExternalLabelName: - description: MetricsExternalLabelName is the name of the external - label used to denote Grafana Agent cluster. Defaults to "cluster." - The external label is _not_ added when the value is set to the - empty string. type: string overrideHonorLabels: - description: OverrideHonorLabels, if true, overrides all configured - honor_labels read from ServiceMonitor or PodMonitor and sets - them to false. type: boolean overrideHonorTimestamps: - description: OverrideHonorTimestamps allows global enforcement - for honoring timestamps in all scrape configs. type: boolean remoteWrite: - description: RemoteWrite controls default remote_write settings - for all instances. If an instance does not provide its own RemoteWrite - settings, these will be used instead. items: - description: RemoteWriteSpec defines the remote_write configuration - for Prometheus. properties: basicAuth: - description: BasicAuth for the URL. properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -4511,82 +2214,45 @@ spec: x-kubernetes-map-type: atomic type: object bearerToken: - description: BearerToken used for remote_write. type: string bearerTokenFile: - description: BearerTokenFile used to read bearer token. type: string headers: additionalProperties: type: string - description: Headers is a set of custom HTTP headers to - be sent along with each remote_write request. Be aware - that any headers set by Grafana Agent itself can't be - overwritten. type: object metadataConfig: - description: MetadataConfig configures the sending of series - metadata to remote storage. properties: send: - description: Send enables metric metadata to be sent - to remote storage. type: boolean sendInterval: - description: SendInterval controls how frequently metric - metadata is sent to remote storage. type: string type: object name: - description: Name of the remote_write queue. Must be unique - if specified. The name is used in metrics and logging - in order to differentiate queues. type: string oauth2: - description: Oauth2 for URL properties: clientId: - description: The secret or configmap containing the - OAuth2 client id properties: configMap: - description: ConfigMap containing data to use for - the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the - targets. properties: key: - description: The key of the secret to select - from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -4594,21 +2260,12 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client - secret properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -4617,15 +2274,12 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL type: object scopes: - description: OAuth2 scopes used for the token request items: type: string type: array tokenUrl: - description: The URL to fetch the token from minLength: 1 type: string required: @@ -4634,109 +2288,57 @@ spec: - tokenUrl type: object proxyUrl: - description: ProxyURL to proxy requests through. Optional. type: string queueConfig: - description: QueueConfig allows tuning of the remote_write - queue parameters. properties: batchSendDeadline: - description: BatchSendDeadline is the maximum time a - sample will wait in the buffer. type: string capacity: - description: Capacity is the number of samples to buffer - per shard before samples start being dropped. type: integer maxBackoff: - description: MaxBackoff is the maximum retry delay. type: string maxRetries: - description: MaxRetries is the maximum number of times - to retry a batch on recoverable errors. type: integer maxSamplesPerSend: - description: MaxSamplesPerSend is the maximum number - of samples per send. type: integer maxShards: - description: MaxShards is the maximum number of shards, - i.e., the amount of concurrency. type: integer minBackoff: - description: MinBackoff is the initial retry delay. - MinBackoff is doubled for every retry. type: string minShards: - description: MinShards is the minimum number of shards, - i.e., the amount of concurrency. type: integer retryOnRateLimit: - description: RetryOnRateLimit retries requests when - encountering rate limits. type: boolean type: object remoteTimeout: - description: RemoteTimeout is the timeout for requests to - the remote_write endpoint. type: string sigv4: - description: SigV4 configures SigV4-based authentication - to the remote_write endpoint. SigV4-based authentication - is used if SigV4 is defined, even with an empty object. properties: accessKey: - description: AccessKey holds the secret of the AWS API - access key to use for signing. If not provided, the - environment variable AWS_ACCESS_KEY_ID is used. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic profile: - description: Profile is the named AWS profile to use - for authentication. type: string region: - description: Region of the AWS endpoint. If blank, the - region from the default credentials chain is used. type: string roleARN: - description: RoleARN is the AWS Role ARN to use for - authentication, as an alternative for using the AWS - API keys. type: string secretKey: - description: SecretKey of the AWS API to use for signing. - If blank, the environment variable AWS_SECRET_ACCESS_KEY - is used. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -4744,50 +2346,28 @@ spec: x-kubernetes-map-type: atomic type: object tlsConfig: - description: TLSConfig to use for remote_write. properties: ca: - description: Certificate authority used when verifying - server certificates. properties: configMap: - description: ConfigMap containing data to use for - the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the - targets. properties: key: - description: The key of the secret to select - from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -4795,51 +2375,28 @@ spec: x-kubernetes-map-type: atomic type: object caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. type: string cert: - description: Client certificate to present when doing - client-authentication. properties: configMap: - description: ConfigMap containing data to use for - the targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the - targets. properties: key: - description: The key of the secret to select - from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined type: boolean required: - key @@ -4847,59 +2404,33 @@ spec: x-kubernetes-map-type: atomic type: object certFile: - description: Path to the client cert file in the Prometheus - container for the targets. type: string insecureSkipVerify: - description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. type: string keySecret: - description: Secret containing the client key file for - the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object url: - description: URL of the endpoint to send samples to. type: string writeRelabelConfigs: - description: WriteRelabelConfigs holds relabel_configs to - relabel samples before they are sent to the remote_write - endpoint. items: - description: 'RelabelConfig allows dynamic rewriting of - the label set, being applied to samples before ingestion. - It defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -4925,40 +2456,20 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the - extracted value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex - replace is performed if the regular expression matches. - Regex capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated - source label values. default is ';'. type: string sourceLabels: - description: The source labels select values from - existing labels. Their content is concatenated using - the configured separator and matched against the - configured regular expression for the replace, keep, - and drop actions. items: - description: LabelName is a valid Prometheus label - name which may only contain ASCII letters, numbers, - as well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is - written in a replace action. It is mandatory for - replace actions. Regex capture groups are available. type: string type: object type: array @@ -4967,96 +2478,47 @@ spec: type: object type: array replicaExternalLabelName: - description: ReplicaExternalLabelName is the name of the metrics - external label used to denote the replica name. Defaults to - __replica__. The external label is _not_ added when the value - is set to the empty string. type: string replicas: - description: Replicas of each shard to deploy for metrics pods. - Number of replicas multiplied by the number of shards is the - total number of pods created. format: int32 type: integer scrapeInterval: - description: ScrapeInterval is the time between consecutive scrapes. type: string scrapeTimeout: - description: ScrapeTimeout is the time to wait for a target to - respond before marking a scrape as failed. type: string shards: - description: Shards to distribute targets onto. Number of replicas - multiplied by the number of shards is the total number of pods - created. Note that scaling down shards does not reshard data - onto remaining instances; it must be manually moved. Increasing - shards does not reshard data either, but it will continue to - be available from the same instances. Sharding is performed - on the content of the __address__ target meta-label. format: int32 type: integer type: object nodeSelector: additionalProperties: type: string - description: NodeSelector defines which nodes pods should be scheduling - on. type: object paused: - description: Paused prevents actions except for deletion to be performed - on the underlying managed objects. type: boolean podMetadata: - description: PodMetadata configures Labels and Annotations which are - propagated to created Grafana Agent pods. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and - retrieve arbitrary metadata. They are not queryable and should - be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used to - organize and categorize (scope and select) objects. May match - selectors of replication controllers and services. More info: - http://kubernetes.io/docs/user-guide/labels' type: object name: - description: 'Name must be unique within a namespace. Is required - when creating resources, although some resources may allow a - client to request the generation of an appropriate name automatically. - Name is primarily intended for creation idempotence and configuration - definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' type: string type: object portName: - description: Port name used for the pods and governing service. This - defaults to agent-metrics. type: string priorityClassName: - description: PriorityClassName is the priority class assigned to pods. type: string resources: - description: Resources holds requests and limits for individual pods. properties: claims: - description: "Claims lists the names of resources, defined in - spec.resourceClaims, that are used by this container. \n This - is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be set - for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in pod.spec.resourceClaims - of the Pod where this field is used. It makes that resource - available inside a container. type: string required: - name @@ -5072,8 +2534,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -5082,151 +2542,60 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object runtimeClassName: - description: RuntimeClassName is the runtime class assigned to pods. type: string secrets: - description: Secrets is a list of secrets in the same namespace as - the GrafanaAgent object which will be mounted into each running - Grafana Agent pod. The secrets are mounted into /var/lib/grafana-agent/extra-secrets/. items: type: string type: array securityContext: - description: SecurityContext holds pod-level security attributes and - common container settings. When unspecified, defaults to the default - PodSecurityContext. properties: fsGroup: - description: "A special supplemental group that applies to all - containers in a pod. Some volume types allow the Kubelet to - change the ownership of that volume to be owned by the pod: - \n 1. The owning GID will be the FSGroup 2. The setgid bit is - set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- \n If unset, - the Kubelet will not modify the ownership and permissions of - any volume. Note that this field cannot be set when spec.os.name - is windows." format: int64 type: integer fsGroupChangePolicy: - description: 'fsGroupChangePolicy defines behavior of changing - ownership and permission of the volume before being exposed - inside Pod. This field will only apply to volume types which - support fsGroup based ownership(and permissions). It will have - no effect on ephemeral volume types such as: secret, configmaps - and emptydir. Valid values are "OnRootMismatch" and "Always". - If not specified, "Always" is used. Note that this field cannot - be set when spec.os.name is windows.' type: string runAsGroup: - description: The GID to run the entrypoint of the container process. - Uses runtime default if unset. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If set - in both SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. Note that this field cannot - be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. properties: level: - description: Level is SELinux level label that applies to - the container. type: string role: - description: Role is a SELinux role label that applies to - the container. type: string type: - description: Type is a SELinux type label that applies to - the container. type: string user: - description: User is a SELinux user label that applies to - the container. type: string type: object seccompProfile: - description: The seccomp options to use by the containers in this - pod. Note that this field cannot be set when spec.os.name is - windows. properties: localhostProfile: - description: localhostProfile indicates a profile defined - in a file on the node should be used. The profile must be - preconfigured on the node to work. Must be a descending - path, relative to the kubelet's configured seccomp profile - location. Must be set if type is "Localhost". Must NOT be - set for any other type. type: string type: - description: "type indicates which kind of seccomp profile - will be applied. Valid options are: \n Localhost - a profile - defined in a file on the node should be used. RuntimeDefault - - the container runtime default profile should be used. - Unconfined - no profile should be applied." type: string required: - type type: object supplementalGroups: - description: A list of groups applied to the first process run - in each container, in addition to the container's primary GID, - the fsGroup (if specified), and group memberships defined in - the container image for the uid of the container process. If - unspecified, no additional groups are added to any container. - Note that group memberships defined in the container image for - the uid of the container process are still effective, even if - they are not included in this list. Note that this field cannot - be set when spec.os.name is windows. items: format: int64 type: integer type: array sysctls: - description: Sysctls hold a list of namespaced sysctls used for - the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. Note that this field cannot be set when - spec.os.name is windows. items: - description: Sysctl defines a kernel parameter to be set properties: name: - description: Name of a property to set type: string value: - description: Value of a property to set type: string required: - name @@ -5234,146 +2603,53 @@ spec: type: object type: array windowsOptions: - description: The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. Note - that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named by - the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA - credential spec to use. type: string hostProcess: - description: HostProcess determines if a container should - be run as a 'Host Process' container. All of a Pod's containers - must have the same effective HostProcess value (it is not - allowed to have a mix of HostProcess containers and non-HostProcess - containers). In addition, if HostProcess is true then HostNetwork - must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. type: string type: object type: object serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount - to use for running Grafana Agent pods. type: string storage: - description: Storage spec to specify how storage will be used. properties: disableMountSubPath: - description: '*Deprecated: subPath usage will be removed in a - future release.*' type: boolean emptyDir: - description: 'EmptyDirVolumeSource to be used by the StatefulSet. - If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: medium: - description: 'medium represents what type of storage medium - should back this directory. The default is "" which means - to use the node''s default medium. Must be an empty string - (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string sizeLimit: anyOf: - type: integer - type: string - description: 'sizeLimit is the total amount of local storage - required for this EmptyDir volume. The size limit is also - applicable for memory medium. The maximum usage on memory - medium EmptyDir would be the minimum value between the SizeLimit - specified here and the sum of memory limits of all containers - in a pod. The default is nil which means that the limit - is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object ephemeral: - description: 'EphemeralVolumeSource to be used by the StatefulSet. - This is a beta field in k8s 1.21 and GA in 1.15. For lower versions, - starting with k8s 1.19, it requires enabling the GenericEphemeralVolume - feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' properties: volumeClaimTemplate: - description: "Will be used to create a stand-alone PVC to - provision the volume. The pod in which this EphemeralVolumeSource - is embedded will be the owner of the PVC, i.e. the PVC will - be deleted together with the pod. The name of the PVC will - be `-` where `` is the - name from the `PodSpec.Volumes` array entry. Pod validation - will reject the pod if the concatenated name is not valid - for a PVC (for example, too long). \n An existing PVC with - that name that is not owned by the pod will *not* be used - for the pod to avoid using an unrelated volume by mistake. - Starting the pod is then blocked until the unrelated PVC - is removed. If such a pre-created PVC is meant to be used - by the pod, the PVC has to updated with an owner reference - to the pod once the pod exists. Normally this should not - be necessary, but it may be useful when manually reconstructing - a broken cluster. \n This field is read-only and no changes - will be made by Kubernetes to the PVC after it has been - created. \n Required, must not be nil." properties: metadata: - description: May contain labels and annotations that will - be copied into the PVC when creating it. No other fields - are allowed and will be rejected during validation. type: object spec: - description: The specification for the PersistentVolumeClaim. - The entire content is copied unchanged into the PVC - that gets created from this template. The same fields - as in a PersistentVolumeClaim are also valid here. properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified data - source. When the AnyVolumeDataSource feature gate - is enabled, dataSource contents will be copied to - dataSourceRef, and dataSourceRef contents will be - copied to dataSource when dataSourceRef.namespace - is not specified. If the namespace is specified, - then dataSourceRef will not be copied to dataSource.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is - required. type: string kind: - description: Kind is the type of resource being - referenced type: string name: - description: Name is the name of resource being - referenced type: string required: - kind @@ -5381,90 +2657,25 @@ spec: type: object x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from - which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a - non-empty API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding - will only succeed if the type of the specified object - matches some installed volume populator or dynamic - provisioner. This field will replace the functionality - of the dataSource field and as such if both fields - are non-empty, they must have the same value. For - backwards compatibility, when namespace isn''t specified - in dataSourceRef, both fields (dataSource and dataSourceRef) - will be set to the same value automatically if one - of them is empty and the other is non-empty. When - namespace is specified in dataSourceRef, dataSource - isn''t set to the same value and must be empty. - There are three important differences between dataSource - and dataSourceRef: * While dataSource only allows - two specific types of objects, dataSourceRef allows - any non-core object, as well as PersistentVolumeClaim - objects. * While dataSource ignores disallowed values - (dropping them), dataSourceRef preserves all values, - and generates an error if a disallowed value is - specified. * While dataSource only allows local - objects, dataSourceRef allows objects in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled. (Alpha) Using the namespace - field of dataSourceRef requires the CrossNamespaceVolumeDataSource - feature gate to be enabled.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is - required. type: string kind: - description: Kind is the type of resource being - referenced type: string name: - description: Name is the name of resource being - referenced type: string namespace: - description: Namespace is the namespace of resource - being referenced Note that when a namespace - is specified, a gateway.networking.k8s.io/ReferenceGrant - object is required in the referent namespace - to allow that namespace's owner to accept the - reference. See the ReferenceGrant documentation - for details. (Alpha) This field requires the - CrossNamespaceVolumeDataSource feature gate - to be enabled. type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity recorded - in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It - can only be set for containers." items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of - one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes - that resource available inside a container. type: string required: - name @@ -5480,8 +2691,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -5490,43 +2699,18 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. Requests cannot - exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: - description: selector is a label query over volumes - to consider for binding. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. properties: key: - description: key is the label key that the - selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. items: type: string type: array @@ -5538,27 +2722,14 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the - StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem is - implied when not included in claim spec. type: string volumeName: - description: volumeName is the binding reference to - the PersistentVolume backing this claim. type: string type: object required: @@ -5566,87 +2737,37 @@ spec: type: object type: object volumeClaimTemplate: - description: Defines the PVC spec to be used by the Prometheus - StatefulSets. The easiest way to use a volume that cannot be - automatically provisioned is to use a label selector alongside - manually created PersistentVolumes. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: - description: EmbeddedMetadata contains metadata relevant to - an EmbeddedResource. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value - map stored with a resource that may be set by external - tools to store and retrieve arbitrary metadata. They - are not queryable and should be preserved when modifying - objects. More info: http://kubernetes.io/docs/user-guide/annotations' type: object labels: additionalProperties: type: string - description: 'Map of string keys and values that can be - used to organize and categorize (scope and select) objects. - May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' type: object name: - description: 'Name must be unique within a namespace. - Is required when creating resources, although some resources - may allow a client to request the generation of an appropriate - name automatically. Name is primarily intended for creation - idempotence and configuration definition. Cannot be - updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' type: string type: object spec: - description: 'Defines the desired characteristics of a volume - requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified - data source, it will create a new volume based on the - contents of the specified data source. When the AnyVolumeDataSource - feature gate is enabled, dataSource contents will be - copied to dataSourceRef, and dataSourceRef contents - will be copied to dataSource when dataSourceRef.namespace - is not specified. If the namespace is specified, then - dataSourceRef will not be copied to dataSource.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. type: string kind: - description: Kind is the type of resource being referenced type: string name: - description: Name is the name of resource being referenced type: string required: - kind @@ -5654,85 +2775,25 @@ spec: type: object x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from - which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty - API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding - will only succeed if the type of the specified object - matches some installed volume populator or dynamic provisioner. - This field will replace the functionality of the dataSource - field and as such if both fields are non-empty, they - must have the same value. For backwards compatibility, - when namespace isn''t specified in dataSourceRef, both - fields (dataSource and dataSourceRef) will be set to - the same value automatically if one of them is empty - and the other is non-empty. When namespace is specified - in dataSourceRef, dataSource isn''t set to the same - value and must be empty. There are three important differences - between dataSource and dataSourceRef: * While dataSource - only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim - objects. * While dataSource ignores disallowed values - (dropping them), dataSourceRef preserves all values, - and generates an error if a disallowed value is specified. - * While dataSource only allows local objects, dataSourceRef - allows objects in any namespaces. (Beta) Using this - field requires the AnyVolumeDataSource feature gate - to be enabled. (Alpha) Using the namespace field of - dataSourceRef requires the CrossNamespaceVolumeDataSource - feature gate to be enabled.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. type: string kind: - description: Kind is the type of resource being referenced type: string name: - description: Name is the name of resource being referenced type: string namespace: - description: Namespace is the namespace of resource - being referenced Note that when a namespace is specified, - a gateway.networking.k8s.io/ReferenceGrant object - is required in the referent namespace to allow that - namespace's owner to accept the reference. See the - ReferenceGrant documentation for details. (Alpha) - This field requires the CrossNamespaceVolumeDataSource - feature gate to be enabled. type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify resource - requirements that are lower than previous value but - must still be higher than capacity recorded in the status - field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used by - this container. \n This is an alpha field and requires - enabling the DynamicResourceAllocation feature gate. - \n This field is immutable. It can only be set for - containers." items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one - entry in pod.spec.resourceClaims of the Pod - where this field is used. It makes that resource - available inside a container. type: string required: - name @@ -5748,8 +2809,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -5758,42 +2817,18 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. Requests cannot exceed Limits. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: - description: selector is a label query over volumes to - consider for binding. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. items: type: string type: array @@ -5805,80 +2840,25 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: - description: volumeMode defines what type of volume is - required by the claim. Value of Filesystem is implied - when not included in claim spec. type: string volumeName: - description: volumeName is the binding reference to the - PersistentVolume backing this claim. type: string type: object status: - description: '*Deprecated: this field is never set.*' properties: accessModes: - description: 'accessModes contains the actual access modes - the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array allocatedResourceStatuses: additionalProperties: - description: When a controller receives persistentvolume - claim update with ClaimResourceStatus for a resource - that it does not recognizes, then it should ignore - that update and let other controllers handle it. type: string - description: "allocatedResourceStatuses stores status - of resource being resized for the given PVC. Key names - follow standard Kubernetes label syntax. Valid values - are either: * Un-prefixed keys: - storage - the capacity - of the volume. * Custom resources must use implementation-defined - prefixed names such as \"example.com/my-custom-resource\" - Apart from above values - keys that are unprefixed or - have kubernetes.io prefix are considered reserved and - hence may not be used. \n ClaimResourceStatus can be - in any of following states: - ControllerResizeInProgress: - State set when resize controller starts resizing the - volume in control-plane. - ControllerResizeFailed: State - set when resize has failed in resize controller with - a terminal error. - NodeResizePending: State set when - resize controller has finished resizing the volume but - further resizing of volume is needed on the node. - - NodeResizeInProgress: State set when kubelet starts - resizing the volume. - NodeResizeFailed: State set when - resizing has failed in kubelet with a terminal error. - Transient errors don't set NodeResizeFailed. For example: - if expanding a PVC for more capacity - this field can - be one of the following states: - pvc.status.allocatedResourceStatus['storage'] - = \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] - = \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage'] - = \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage'] - = \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] - = \"NodeResizeFailed\" When this field is not set, it - means that no resize operation is in progress for the - given PVC. \n A controller that receives PVC update - with previously unknown resourceName or ClaimResourceStatus - should ignore the update for the purpose it was designed. - For example - a controller that only is responsible - for resizing capacity of the volume, should ignore PVC - updates that change other valid resources associated - with PVC. \n This is an alpha field and requires enabling - RecoverVolumeExpansionFailure feature." type: object x-kubernetes-map-type: granular allocatedResources: @@ -5888,31 +2868,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: "allocatedResources tracks the resources - allocated to a PVC including its capacity. Key names - follow standard Kubernetes label syntax. Valid values - are either: * Un-prefixed keys: - storage - the capacity - of the volume. * Custom resources must use implementation-defined - prefixed names such as \"example.com/my-custom-resource\" - Apart from above values - keys that are unprefixed or - have kubernetes.io prefix are considered reserved and - hence may not be used. \n Capacity reported here may - be larger than the actual capacity when a volume expansion - operation is requested. For storage quota, the larger - value from allocatedResources and PVC.spec.resources - is used. If allocatedResources is not set, PVC.spec.resources - alone is used for quota calculation. If a volume expansion - capacity request is lowered, allocatedResources is only - lowered if there are no expansion operations in progress - and if the actual volume capacity is equal or lower - than the requested capacity. \n A controller that receives - PVC update with previously unknown resourceName should - ignore the update for the purpose it was designed. For - example - a controller that only is responsible for - resizing capacity of the volume, should ignore PVC updates - that change other valid resources associated with PVC. - \n This is an alpha field and requires enabling RecoverVolumeExpansionFailure - feature." type: object capacity: additionalProperties: @@ -5921,43 +2876,23 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: capacity represents the actual resources - of the underlying volume. type: object conditions: - description: conditions is the current Condition of persistent - volume claim. If underlying persistent volume is being - resized then the Condition will be set to 'ResizeStarted'. items: - description: PersistentVolumeClaimCondition contains - details about state of pvc properties: lastProbeTime: - description: lastProbeTime is the time we probed - the condition. format: date-time type: string lastTransitionTime: - description: lastTransitionTime is the time the - condition transitioned from one status to another. format: date-time type: string message: - description: message is the human-readable message - indicating details about last transition. type: string reason: - description: reason is a unique, this should be - a short, machine understandable string that gives - the reason for condition's last transition. If - it reports "ResizeStarted" that means the underlying - persistent volume is being resized. type: string status: type: string type: - description: PersistentVolumeClaimConditionType - is a valid value of PersistentVolumeClaimCondition.Type type: string required: - status @@ -5965,86 +2900,39 @@ spec: type: object type: array phase: - description: phase represents the current phase of PersistentVolumeClaim. type: string type: object type: object type: object tolerations: - description: Tolerations, if specified, controls the pod's tolerations. items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . properties: effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. type: string operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: TopologySpreadConstraints, if specified, controls the - pod's topology spread constraints. items: - description: TopologySpreadConstraint specifies how to spread matching - pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching pods. Pods - that match this label selector are counted to determine the - number of pods in their corresponding topology domain. properties: matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. properties: key: - description: key is the label key that the selector - applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. items: type: string type: array @@ -6056,126 +2944,27 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select - the pods over which spreading will be calculated. The keys - are used to lookup values from the incoming pod labels, those - key-value labels are ANDed with labelSelector to select the - group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in - both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot - be set when LabelSelector isn't set. Keys that don't exist - in the incoming pod labels will be ignored. A null or empty - list means only match against labelSelector. \n This is a - beta field and requires the MatchLabelKeysInPodTopologySpread - feature gate to be enabled (enabled by default)." items: type: string type: array x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which pods may - be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the number - of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods - in an eligible domain or zero if the number of eligible domains - is less than MinDomains. For example, in a 3-zone cluster, - MaxSkew is set to 1, and pods with the same labelSelector - spread as 2/2/1: In this case, the global minimum is 1. | - zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 to become - 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) - on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming - pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies that satisfy - it. It''s a required field. Default value is 1 and 0 is not - allowed.' format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number of eligible - domains. When the number of eligible domains with matching - topology keys is less than minDomains, Pod Topology Spread - treats \"global minimum\" as 0, and then the calculation of - Skew is performed. And when the number of eligible domains - with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, scheduler - won't schedule more than maxSkew Pods to those domains. If - value is nil, the constraint behaves as if MinDomains is equal - to 1. Valid values are integers greater than 0. When value - is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For - example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains - is set to 5 and pods with the same labelSelector spread as - 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), so \"global - minimum\" is treated as 0. In this situation, new pod with - the same labelSelector cannot be scheduled, because computed - skew will be 3(3 - 0) if new Pod is scheduled to any of the - three zones, it will violate MaxSkew. \n This is a beta field - and requires the MinDomainsInPodTopologySpread feature gate - to be enabled (enabled by default)." format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat - Pod's nodeAffinity/nodeSelector when calculating pod topology - spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector - are included in the calculations. - Ignore: nodeAffinity/nodeSelector - are ignored. All nodes are included in the calculations. \n - If this value is nil, the behavior is equivalent to the Honor - policy. This is a beta-level feature default enabled by the - NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node - taints when calculating pod topology spread skew. Options - are: - Honor: nodes without taints, along with tainted nodes - for which the incoming pod has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - \n If this value is nil, the behavior is equivalent to the - Ignore policy. This is a beta-level feature default enabled - by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: - description: TopologyKey is the key of node labels. Nodes that - have a label with this key and identical values are considered - to be in the same topology. We consider each - as a "bucket", and try to put balanced number of pods into - each bucket. We define a domain as a particular instance of - a topology. Also, we define an eligible domain as a domain - whose nodes meet the requirements of nodeAffinityPolicy and - nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain of - that topology. It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a - pod if it doesn''t satisfy the spread constraint. - DoNotSchedule - (default) tells the scheduler not to schedule it. - ScheduleAnyway - tells the scheduler to schedule the pod in any location, but - giving higher precedence to topologies that would help reduce - the skew. A constraint is considered "Unsatisfiable" for an - incoming pod if and only if every possible node assignment - for that pod would violate "MaxSkew" on some topology. For - example, in a 3-zone cluster, MaxSkew is set to 1, and pods - with the same labelSelector spread as 3/1/1: | zone1 | zone2 - | zone3 | | P P P | P | P | If WhenUnsatisfiable is - set to DoNotSchedule, incoming pod can only be scheduled to - zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on - zone2(zone3) satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make it *more* - imbalanced. It''s a required field.' type: string required: - maxSkew @@ -6184,44 +2973,21 @@ spec: type: object type: array version: - description: Version of Grafana Agent to be deployed. type: string volumeMounts: - description: VolumeMounts lets you configure additional VolumeMounts - on the output StatefulSet definition. Specified VolumeMounts are - appended to other VolumeMounts generated as a result of StorageSpec - objects in the Grafana Agent container. items: - description: VolumeMount describes a mounting of a Volume within - a container. properties: mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When - not set, MountPropagationNone is used. This field is beta - in 1.10. type: string name: - description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. type: boolean subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from which the - container's volume should be mounted. Behaves similarly to - SubPath but environment variable references $(VAR_NAME) are - expanded using the container's environment. Defaults to "" - (volume's root). SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath @@ -6229,224 +2995,106 @@ spec: type: object type: array volumes: - description: Volumes allows configuration of additional volumes on - the output StatefulSet definition. The volumes specified are appended - to other volumes that are generated as a result of StorageSpec objects. items: - description: Volume represents a named volume in a pod that may - be accessed by any container in the pod. properties: awsElasticBlockStore: - description: 'awsElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' properties: fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty).' format: int32 type: integer readOnly: - description: 'readOnly value true will force the readOnly - setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: boolean volumeID: - description: 'volumeID is unique ID of the persistent disk - resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string required: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. properties: cachingMode: - description: 'cachingMode is the Host Caching mode: None, - Read Only, Read Write.' type: string diskName: - description: diskName is the Name of the data disk in the - blob storage type: string diskURI: - description: diskURI is the URI of data disk in the blob - storage type: string fsType: - description: fsType is Filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single blob - disk per storage account Managed: azure managed data - disk (only in managed availability set). defaults to shared' type: string readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean required: - diskName - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. properties: readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean secretName: - description: secretName is the name of secret that contains - Azure Storage Account Name and Key type: string shareName: - description: shareName is the azure share Name type: string required: - secretName - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime properties: monitors: - description: 'monitors is Required: Monitors is a collection - of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array path: - description: 'path is Optional: Used as the mounted root, - rather than the full Ceph tree, default is /' type: string readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: boolean secretFile: - description: 'secretFile is Optional: SecretFile is the - path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string secretRef: - description: 'secretRef is Optional: SecretRef is reference - to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic user: - description: 'user is optional: User is the rados user name, - default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string required: - monitors type: object cinder: - description: 'cinder represents a cinder volume attached and - mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' properties: fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to - be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string readOnly: - description: 'readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: boolean secretRef: - description: 'secretRef is optional: points to a secret - object containing parameters used to connect to OpenStack.' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic volumeID: - description: 'volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string required: - volumeID type: object configMap: - description: configMap represents a configMap that should populate - this volume properties: defaultMode: - description: 'defaultMode is optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer items: - description: items if unspecified, each key-value pair in - the Data field of the referenced ConfigMap will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the ConfigMap, the volume setup will error unless it is - marked optional. Paths must be relative and may not contain - the '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. type: string required: - key @@ -6454,139 +3102,66 @@ spec: type: object type: array name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: optional specify whether the ConfigMap or its - keys must be defined type: boolean type: object x-kubernetes-map-type: atomic csi: - description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). properties: driver: - description: driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. type: string fsType: - description: fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated - CSI driver which will determine the default filesystem - to apply. type: string nodePublishSecretRef: - description: nodePublishSecretRef is a reference to the - secret object containing sensitive information to pass - to the CSI driver to complete the CSI NodePublishVolume - and NodeUnpublishVolume calls. This field is optional, - and may be empty if no secret is required. If the secret - object contains more than one secret, all secret references - are passed. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic readOnly: - description: readOnly specifies a read-only configuration - for the volume. Defaults to false (read/write). type: boolean volumeAttributes: additionalProperties: type: string - description: volumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. type: object required: - driver type: object downwardAPI: - description: downwardAPI represents downward API about the pod - that should populate this volume properties: defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a Optional: mode bits used to set - permissions on created files by default. Must be an octal - value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer items: - description: Items is a list of downward API volume file items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field properties: fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are - supported.' properties: apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the - specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to set permissions - on this file, must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' format: int32 type: integer path: - description: 'Required: Path is the relative path - name of the file to be created. Must not be absolute - or contain the ''..'' path. Must be utf-8 encoded. - The first item of the relative path must not start - with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' properties: containerName: - description: 'Container name: required for volumes, - optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource @@ -6598,114 +3173,35 @@ spec: type: array type: object emptyDir: - description: 'emptyDir represents a temporary directory that - shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' properties: medium: - description: 'medium represents what type of storage medium - should back this directory. The default is "" which means - to use the node''s default medium. Must be an empty string - (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string sizeLimit: anyOf: - type: integer - type: string - description: 'sizeLimit is the total amount of local storage - required for this EmptyDir volume. The size limit is also - applicable for memory medium. The maximum usage on memory - medium EmptyDir would be the minimum value between the - SizeLimit specified here and the sum of memory limits - of all containers in a pod. The default is nil which means - that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object ephemeral: - description: "ephemeral represents a volume that is handled - by a cluster storage driver. The volume's lifecycle is tied - to the pod that defines it - it will be created before the - pod starts, and deleted when the pod is removed. \n Use this - if: a) the volume is only needed while the pod runs, b) features - of normal volumes like restoring from snapshot or capacity - tracking are needed, c) the storage driver is specified through - a storage class, and d) the storage driver supports dynamic - volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource - for more information on the connection between this volume - type and PersistentVolumeClaim). \n Use PersistentVolumeClaim - or one of the vendor-specific APIs for volumes that persist - for longer than the lifecycle of an individual pod. \n Use - CSI for light-weight local ephemeral volumes if the CSI driver - is meant to be used that way - see the documentation of the - driver for more information. \n A pod can use both types of - ephemeral volumes and persistent volumes at the same time." properties: volumeClaimTemplate: - description: "Will be used to create a stand-alone PVC to - provision the volume. The pod in which this EphemeralVolumeSource - is embedded will be the owner of the PVC, i.e. the PVC - will be deleted together with the pod. The name of the - PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. - Pod validation will reject the pod if the concatenated - name is not valid for a PVC (for example, too long). \n - An existing PVC with that name that is not owned by the - pod will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC - is meant to be used by the pod, the PVC has to updated - with an owner reference to the pod once the pod exists. - Normally this should not be necessary, but it may be useful - when manually reconstructing a broken cluster. \n This - field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. \n Required, must - not be nil." properties: metadata: - description: May contain labels and annotations that - will be copied into the PVC when creating it. No other - fields are allowed and will be rejected during validation. type: object spec: - description: The specification for the PersistentVolumeClaim. - The entire content is copied unchanged into the PVC - that gets created from this template. The same fields - as in a PersistentVolumeClaim are also valid here. properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified - data source. When the AnyVolumeDataSource feature - gate is enabled, dataSource contents will be copied - to dataSourceRef, and dataSourceRef contents will - be copied to dataSource when dataSourceRef.namespace - is not specified. If the namespace is specified, - then dataSourceRef will not be copied to dataSource.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. type: string kind: - description: Kind is the type of resource being - referenced type: string name: - description: Name is the name of resource being - referenced type: string required: - kind @@ -6713,94 +3209,25 @@ spec: type: object x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, if - a non-empty volume is desired. This may be any - object from a non-empty API group (non core object) - or a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. - This field will replace the functionality of the - dataSource field and as such if both fields are - non-empty, they must have the same value. For - backwards compatibility, when namespace isn''t - specified in dataSourceRef, both fields (dataSource - and dataSourceRef) will be set to the same value - automatically if one of them is empty and the - other is non-empty. When namespace is specified - in dataSourceRef, dataSource isn''t set to the - same value and must be empty. There are three - important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types - of objects, dataSourceRef allows any non-core - object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping - them), dataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - * While dataSource only allows local objects, - dataSourceRef allows objects in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled. (Alpha) Using the - namespace field of dataSourceRef requires the - CrossNamespaceVolumeDataSource feature gate to - be enabled.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. type: string kind: - description: Kind is the type of resource being - referenced type: string name: - description: Name is the name of resource being - referenced type: string namespace: - description: Namespace is the namespace of resource - being referenced Note that when a namespace - is specified, a gateway.networking.k8s.io/ReferenceGrant - object is required in the referent namespace - to allow that namespace's owner to accept - the reference. See the ReferenceGrant documentation - for details. (Alpha) This field requires the - CrossNamespaceVolumeDataSource feature gate - to be enabled. type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity recorded - in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." items: - description: ResourceClaim references one - entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. type: string required: - name @@ -6816,8 +3243,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -6826,46 +3251,18 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: - description: selector is a label query over volumes - to consider for binding. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. properties: key: - description: key is the label key that - the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. items: type: string type: array @@ -6877,28 +3274,14 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the - StorageClass required by the claim. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. type: string volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. type: string type: object required: @@ -6906,74 +3289,38 @@ spec: type: object type: object fc: - description: fc represents a Fibre Channel resource that is - attached to a kubelet's host machine and then exposed to the - pod. properties: fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. TODO: how do we prevent errors in the - filesystem from compromising the machine' type: string lun: - description: 'lun is Optional: FC target lun number' format: int32 type: integer readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean targetWWNs: - description: 'targetWWNs is Optional: FC target worldwide - names (WWNs)' items: type: string type: array wwids: - description: 'wwids Optional: FC volume world wide identifiers - (wwids) Either wwids or combination of targetWWNs and - lun must be set, but not both simultaneously.' items: type: string type: array type: object flexVolume: - description: flexVolume represents a generic volume resource - that is provisioned/attached using an exec based plugin. properties: driver: - description: driver is the name of the driver to use for - this volume. type: string fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends - on FlexVolume script. type: string options: additionalProperties: type: string - description: 'options is Optional: this field holds extra - command options if any.' type: object readOnly: - description: 'readOnly is Optional: defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean secretRef: - description: 'secretRef is Optional: secretRef is reference - to the secret object containing sensitive information - to pass to the plugin scripts. This may be empty if no - secret object is specified. If the secret object contains - more than one secret, all secrets are passed to the plugin - scripts.' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic @@ -6981,184 +3328,88 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running properties: datasetName: - description: datasetName is Name of the dataset stored as - metadata -> name on the dataset for Flocker should be - considered as deprecated type: string datasetUUID: - description: datasetUUID is the UUID of the dataset. This - is unique identifier of a Flocker dataset type: string type: object gcePersistentDisk: - description: 'gcePersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' properties: fsType: - description: 'fsType is filesystem type of the volume that - you want to mount. Tip: Ensure that the filesystem type - is supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' format: int32 type: integer pdName: - description: 'pdName is unique name of the PD resource in - GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: string readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: boolean required: - pdName type: object gitRepo: - description: 'gitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision - a container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' properties: directory: - description: directory is the target directory name. Must - not contain or start with '..'. If '.' is supplied, the - volume directory will be the git repository. Otherwise, - if specified, the volume will contain the git repository - in the subdirectory with the given name. type: string repository: - description: repository is the URL type: string revision: - description: revision is the commit hash for the specified - revision. type: string required: - repository type: object glusterfs: - description: 'glusterfs represents a Glusterfs mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' properties: endpoints: - description: 'endpoints is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string path: - description: 'path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string readOnly: - description: 'readOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to - false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: boolean required: - endpoints - path type: object hostPath: - description: 'hostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' properties: path: - description: 'path of the directory on the host. If the - path is a symlink, it will follow the link to the real - path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string type: - description: 'type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string required: - path type: object iscsi: - description: 'iscsi represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to - the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' properties: chapAuthDiscovery: - description: chapAuthDiscovery defines whether support iSCSI - Discovery CHAP authentication type: boolean chapAuthSession: - description: chapAuthSession defines whether support iSCSI - Session CHAP authentication type: boolean fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string initiatorName: - description: initiatorName is the custom iSCSI Initiator - Name. If initiatorName is specified with iscsiInterface - simultaneously, new iSCSI interface : will be created for the connection. type: string iqn: - description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: - description: iscsiInterface is the interface Name that uses - an iSCSI transport. Defaults to 'default' (tcp). type: string lun: - description: lun represents iSCSI Target Lun number. format: int32 type: integer portals: - description: portals is the iSCSI Target Portal List. The - portal is either an IP or ip_addr:port if the port is - other than default (typically TCP ports 860 and 3260). items: type: string type: array readOnly: - description: readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. type: boolean secretRef: - description: secretRef is the CHAP Secret for iSCSI target - and initiator authentication properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic targetPortal: - description: targetPortal is iSCSI Target Portal. The Portal - is either an IP or ip_addr:port if the port is other than - default (typically TCP ports 860 and 3260). type: string required: - iqn @@ -7166,148 +3417,67 @@ spec: - targetPortal type: object name: - description: 'name of the volume. Must be a DNS_LABEL and unique - within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string nfs: - description: 'nfs represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' properties: path: - description: 'path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string readOnly: - description: 'readOnly here will force the NFS export to - be mounted with read-only permissions. Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: boolean server: - description: 'server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string required: - path - server type: object persistentVolumeClaim: - description: 'persistentVolumeClaimVolumeSource represents a - reference to a PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: claimName: - description: 'claimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' type: string readOnly: - description: readOnly Will force the ReadOnly setting in - VolumeMounts. Default false. type: boolean required: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine properties: fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string pdID: - description: pdID is the ID that identifies Photon Controller - persistent disk type: string required: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine properties: fsType: - description: fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. type: string readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean volumeID: - description: volumeID uniquely identifies a Portworx volume type: string required: - volumeID type: object projected: - description: projected items for all in one resources secrets, - configmaps, and downward API properties: defaultMode: - description: defaultMode are the mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Directories within the path are - not affected by this setting. This might be in conflict - with other options that affect the file mode, like fsGroup, - and the result can be other mode bits set. format: int32 type: integer sources: - description: sources is the list of volume projections items: - description: Projection that may be projected along with - other supported volume types properties: configMap: - description: configMap information about the configMap - data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the ConfigMap, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain the - '..' path or start with '..'. items: - description: Maps a string key to a path within - a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. type: string required: - key @@ -7315,91 +3485,42 @@ spec: type: object type: array name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: optional specify whether the ConfigMap - or its keys must be defined type: boolean type: object x-kubernetes-map-type: atomic downwardAPI: - description: downwardAPI information about the downwardAPI - data to project properties: items: - description: Items is a list of DownwardAPIVolume - file items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field properties: fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name and namespace are supported.' properties: apiVersion: - description: Version of the schema the - FieldPath is written in terms of, - defaults to "v1". type: string fieldPath: - description: Path of the field to select - in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to - set permissions on this file, must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' format: int32 type: integer path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' - path. Must be utf-8 encoded. The first - item of the relative path must not start - with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu - and requests.memory) are currently supported.' properties: containerName: - description: 'Container name: required - for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format - of the exposed resources, defaults - to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to - select' type: string required: - resource @@ -7411,48 +3532,16 @@ spec: type: array type: object secret: - description: secret information about the secret data - to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the Secret, the volume setup - will error unless it is marked optional. Paths - must be relative and may not contain the '..' - path or start with '..'. items: - description: Maps a string key to a path within - a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. type: string required: - key @@ -7460,45 +3549,19 @@ spec: type: object type: array name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: optional field specify whether the - Secret or its key must be defined type: boolean type: object x-kubernetes-map-type: atomic serviceAccountToken: - description: serviceAccountToken is information about - the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, the - kubelet volume plugin will proactively rotate - the service account token. The kubelet will - start trying to rotate the token if the token - is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. type: string required: - path @@ -7507,148 +3570,76 @@ spec: type: array type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime properties: group: - description: group to map volume access to Default is no - group type: string readOnly: - description: readOnly here will force the Quobyte volume - to be mounted with read-only permissions. Defaults to - false. type: boolean registry: - description: registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes type: string tenant: - description: tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin type: string user: - description: user to map volume access to Defaults to serivceaccount - user type: string volume: - description: volume is a string that references an already - created Quobyte volume by name. type: string required: - registry - volume type: object rbd: - description: 'rbd represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' properties: fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string image: - description: 'image is the rados image name. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string keyring: - description: 'keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string monitors: - description: 'monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' items: type: string type: array pool: - description: 'pool is the rados pool name. Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: boolean secretRef: - description: 'secretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is - nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic user: - description: 'user is the rados user name. Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string required: - image - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. properties: fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Default is "xfs". type: string gateway: - description: gateway is the host address of the ScaleIO - API Gateway. type: string protectionDomain: - description: protectionDomain is the name of the ScaleIO - Protection Domain for the configured storage. type: string readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: secretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic sslEnabled: - description: sslEnabled Flag enable/disable SSL communication - with Gateway, default false type: boolean storageMode: - description: storageMode indicates whether the storage for - a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. type: string storagePool: - description: storagePool is the ScaleIO Storage Pool associated - with the protection domain. type: string system: - description: system is the name of the storage system as - configured in ScaleIO. type: string volumeName: - description: volumeName is the name of a volume already - created in the ScaleIO system that is associated with - this volume source. type: string required: - gateway @@ -7656,54 +3647,19 @@ spec: - system type: object secret: - description: 'secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' properties: defaultMode: - description: 'defaultMode is Optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer items: - description: items If unspecified, each key-value pair in - the Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the Secret, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. type: string required: - key @@ -7711,76 +3667,36 @@ spec: type: object type: array optional: - description: optional field specify whether the Secret or - its keys must be defined type: boolean secretName: - description: 'secretName is the name of the secret in the - pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. properties: fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: secretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic volumeName: - description: volumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within - a namespace. type: string volumeNamespace: - description: volumeNamespace specifies the scope of the - volume within StorageOS. If no namespace is specified - then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS - for tighter integration. Set VolumeName to any name to - override the default behaviour. Set to "default" if you - are not using namespaces within StorageOS. Namespaces - that do not pre-exist within StorageOS will be created. type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine properties: fsType: - description: fsType is filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string storagePolicyID: - description: storagePolicyID is the storage Policy Based - Management (SPBM) profile ID associated with the StoragePolicyName. type: string storagePolicyName: - description: storagePolicyName is the storage Policy Based - Management (SPBM) profile name. type: string volumePath: - description: volumePath is the path that identifies vSphere - volume vmdk type: string required: - volumePath diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml index e786166447fd..960b2f73ac12 100644 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml +++ b/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml @@ -20,55 +20,26 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: "Integration runs a single Grafana Agent integration. Integrations - that generate telemetry must be configured to send that telemetry somewhere, - such as autoscrape for exporter-based integrations. \n Integrations have - access to the LogsInstances and MetricsInstances in the same GrafanaAgent - resource set, referenced by the / of the Instance resource. - \n For example, if there is a default/production MetricsInstance, you can - configure a supported integration's autoscrape block with: \n autoscrape: - enable: true metrics_instance: default/production \n There is currently - no way for telemetry created by an Operator-managed integration to be collected - from outside of the integration itself." properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Specifies the desired behavior of the Integration. properties: config: - description: "The configuration for the named integration. Note that - Integrations are deployed with the integrations-next feature flag, - which has different common settings: \n https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/" type: object x-kubernetes-preserve-unknown-fields: true configMaps: - description: "An extra list of keys from ConfigMaps in the same namespace - as the Integration which will be mounted into the Grafana Agent - pod running this Integration. \n ConfigMaps are mounted at /etc/grafana-agent/integrations/configMaps///." items: - description: Selects a key from a ConfigMap. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its key must be - defined type: boolean required: - key @@ -76,26 +47,15 @@ spec: x-kubernetes-map-type: atomic type: array name: - description: Name of the integration to run (e.g., "node_exporter", - "mysqld_exporter"). type: string secrets: - description: "An extra list of keys from Secrets in the same namespace - as the Integration which will be mounted into the Grafana Agent - pod running this Integration. \n Secrets will be mounted at /etc/grafana-agent/integrations/secrets///." items: - description: SecretKeySelector selects a key of a Secret. properties: key: - description: The key of the secret to select from. Must be - a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key @@ -103,60 +63,26 @@ spec: x-kubernetes-map-type: atomic type: array type: - description: Type informs Grafana Agent Operator about how to manage - the integration being configured. properties: allNodes: - description: When true, the configured integration should be run - on every Node in the cluster. This is required for Integrations - that generate Node-specific metrics like node_exporter, otherwise - it must be false to avoid generating duplicate metrics. type: boolean unique: - description: Whether this integration can only be defined once - for a Grafana Agent process, such as statsd_exporter. It is - invalid for a GrafanaAgent to discover multiple unique Integrations - with the same Integration name (i.e., a single GrafanaAgent - cannot deploy two statsd_exporters). type: boolean type: object volumeMounts: - description: "An extra list of VolumeMounts to be associated with - the Grafana Agent pods running this integration. VolumeMount names - are mutated to be unique across all used IntegrationSpecs. \n Mount - paths should include the namespace/name of the Integration CR to - avoid potentially colliding with other resources." items: - description: VolumeMount describes a mounting of a Volume within - a container. properties: mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When - not set, MountPropagationNone is used. This field is beta - in 1.10. type: string name: - description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. type: boolean subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from which the - container's volume should be mounted. Behaves similarly to - SubPath but environment variable references $(VAR_NAME) are - expanded using the container's environment. Defaults to "" - (volume's root). SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath @@ -164,228 +90,106 @@ spec: type: object type: array volumes: - description: "An extra list of Volumes to be associated with the Grafana - Agent pods running this integration. Volume names are mutated to - be unique across all Integrations. Note that the specified volumes - should be able to tolerate existing on multiple pods at once when - type is daemonset. \n Don't use volumes for loading Secrets or ConfigMaps - from the same namespace as the Integration; use the Secrets and - ConfigMaps fields instead." items: - description: Volume represents a named volume in a pod that may - be accessed by any container in the pod. properties: awsElasticBlockStore: - description: 'awsElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' properties: fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty).' format: int32 type: integer readOnly: - description: 'readOnly value true will force the readOnly - setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: boolean volumeID: - description: 'volumeID is unique ID of the persistent disk - resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string required: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. properties: cachingMode: - description: 'cachingMode is the Host Caching mode: None, - Read Only, Read Write.' type: string diskName: - description: diskName is the Name of the data disk in the - blob storage type: string diskURI: - description: diskURI is the URI of data disk in the blob - storage type: string fsType: - description: fsType is Filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single blob - disk per storage account Managed: azure managed data - disk (only in managed availability set). defaults to shared' type: string readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean required: - diskName - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. properties: readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean secretName: - description: secretName is the name of secret that contains - Azure Storage Account Name and Key type: string shareName: - description: shareName is the azure share Name type: string required: - secretName - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime properties: monitors: - description: 'monitors is Required: Monitors is a collection - of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array path: - description: 'path is Optional: Used as the mounted root, - rather than the full Ceph tree, default is /' type: string readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: boolean secretFile: - description: 'secretFile is Optional: SecretFile is the - path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string secretRef: - description: 'secretRef is Optional: SecretRef is reference - to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic user: - description: 'user is optional: User is the rados user name, - default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string required: - monitors type: object cinder: - description: 'cinder represents a cinder volume attached and - mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' properties: fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to - be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string readOnly: - description: 'readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: boolean secretRef: - description: 'secretRef is optional: points to a secret - object containing parameters used to connect to OpenStack.' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic volumeID: - description: 'volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string required: - volumeID type: object configMap: - description: configMap represents a configMap that should populate - this volume properties: defaultMode: - description: 'defaultMode is optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer items: - description: items if unspecified, each key-value pair in - the Data field of the referenced ConfigMap will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the ConfigMap, the volume setup will error unless it is - marked optional. Paths must be relative and may not contain - the '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. type: string required: - key @@ -393,139 +197,66 @@ spec: type: object type: array name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: optional specify whether the ConfigMap or its - keys must be defined type: boolean type: object x-kubernetes-map-type: atomic csi: - description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). properties: driver: - description: driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. type: string fsType: - description: fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated - CSI driver which will determine the default filesystem - to apply. type: string nodePublishSecretRef: - description: nodePublishSecretRef is a reference to the - secret object containing sensitive information to pass - to the CSI driver to complete the CSI NodePublishVolume - and NodeUnpublishVolume calls. This field is optional, - and may be empty if no secret is required. If the secret - object contains more than one secret, all secret references - are passed. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic readOnly: - description: readOnly specifies a read-only configuration - for the volume. Defaults to false (read/write). type: boolean volumeAttributes: additionalProperties: type: string - description: volumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. type: object required: - driver type: object downwardAPI: - description: downwardAPI represents downward API about the pod - that should populate this volume properties: defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a Optional: mode bits used to set - permissions on created files by default. Must be an octal - value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer items: - description: Items is a list of downward API volume file items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field properties: fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are - supported.' properties: apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the - specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to set permissions - on this file, must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' format: int32 type: integer path: - description: 'Required: Path is the relative path - name of the file to be created. Must not be absolute - or contain the ''..'' path. Must be utf-8 encoded. - The first item of the relative path must not start - with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' properties: containerName: - description: 'Container name: required for volumes, - optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource @@ -537,114 +268,35 @@ spec: type: array type: object emptyDir: - description: 'emptyDir represents a temporary directory that - shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' properties: medium: - description: 'medium represents what type of storage medium - should back this directory. The default is "" which means - to use the node''s default medium. Must be an empty string - (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string sizeLimit: anyOf: - type: integer - type: string - description: 'sizeLimit is the total amount of local storage - required for this EmptyDir volume. The size limit is also - applicable for memory medium. The maximum usage on memory - medium EmptyDir would be the minimum value between the - SizeLimit specified here and the sum of memory limits - of all containers in a pod. The default is nil which means - that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object ephemeral: - description: "ephemeral represents a volume that is handled - by a cluster storage driver. The volume's lifecycle is tied - to the pod that defines it - it will be created before the - pod starts, and deleted when the pod is removed. \n Use this - if: a) the volume is only needed while the pod runs, b) features - of normal volumes like restoring from snapshot or capacity - tracking are needed, c) the storage driver is specified through - a storage class, and d) the storage driver supports dynamic - volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource - for more information on the connection between this volume - type and PersistentVolumeClaim). \n Use PersistentVolumeClaim - or one of the vendor-specific APIs for volumes that persist - for longer than the lifecycle of an individual pod. \n Use - CSI for light-weight local ephemeral volumes if the CSI driver - is meant to be used that way - see the documentation of the - driver for more information. \n A pod can use both types of - ephemeral volumes and persistent volumes at the same time." properties: volumeClaimTemplate: - description: "Will be used to create a stand-alone PVC to - provision the volume. The pod in which this EphemeralVolumeSource - is embedded will be the owner of the PVC, i.e. the PVC - will be deleted together with the pod. The name of the - PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. - Pod validation will reject the pod if the concatenated - name is not valid for a PVC (for example, too long). \n - An existing PVC with that name that is not owned by the - pod will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC - is meant to be used by the pod, the PVC has to updated - with an owner reference to the pod once the pod exists. - Normally this should not be necessary, but it may be useful - when manually reconstructing a broken cluster. \n This - field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. \n Required, must - not be nil." properties: metadata: - description: May contain labels and annotations that - will be copied into the PVC when creating it. No other - fields are allowed and will be rejected during validation. type: object spec: - description: The specification for the PersistentVolumeClaim. - The entire content is copied unchanged into the PVC - that gets created from this template. The same fields - as in a PersistentVolumeClaim are also valid here. properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified - data source. When the AnyVolumeDataSource feature - gate is enabled, dataSource contents will be copied - to dataSourceRef, and dataSourceRef contents will - be copied to dataSource when dataSourceRef.namespace - is not specified. If the namespace is specified, - then dataSourceRef will not be copied to dataSource.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. type: string kind: - description: Kind is the type of resource being - referenced type: string name: - description: Name is the name of resource being - referenced type: string required: - kind @@ -652,94 +304,25 @@ spec: type: object x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, if - a non-empty volume is desired. This may be any - object from a non-empty API group (non core object) - or a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. - This field will replace the functionality of the - dataSource field and as such if both fields are - non-empty, they must have the same value. For - backwards compatibility, when namespace isn''t - specified in dataSourceRef, both fields (dataSource - and dataSourceRef) will be set to the same value - automatically if one of them is empty and the - other is non-empty. When namespace is specified - in dataSourceRef, dataSource isn''t set to the - same value and must be empty. There are three - important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types - of objects, dataSourceRef allows any non-core - object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping - them), dataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - * While dataSource only allows local objects, - dataSourceRef allows objects in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled. (Alpha) Using the - namespace field of dataSourceRef requires the - CrossNamespaceVolumeDataSource feature gate to - be enabled.' properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API - group. For any other third-party types, APIGroup - is required. type: string kind: - description: Kind is the type of resource being - referenced type: string name: - description: Name is the name of resource being - referenced type: string namespace: - description: Namespace is the namespace of resource - being referenced Note that when a namespace - is specified, a gateway.networking.k8s.io/ReferenceGrant - object is required in the referent namespace - to allow that namespace's owner to accept - the reference. See the ReferenceGrant documentation - for details. (Alpha) This field requires the - CrossNamespaceVolumeDataSource feature gate - to be enabled. type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity recorded - in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." items: - description: ResourceClaim references one - entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. type: string required: - name @@ -755,8 +338,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -765,46 +346,18 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: - description: selector is a label query over volumes - to consider for binding. properties: matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. properties: key: - description: key is the label key that - the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. items: type: string type: array @@ -816,28 +369,14 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the - StorageClass required by the claim. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. type: string volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. type: string type: object required: @@ -845,74 +384,38 @@ spec: type: object type: object fc: - description: fc represents a Fibre Channel resource that is - attached to a kubelet's host machine and then exposed to the - pod. properties: fsType: - description: 'fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. TODO: how do we prevent errors in the - filesystem from compromising the machine' type: string lun: - description: 'lun is Optional: FC target lun number' format: int32 type: integer readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean targetWWNs: - description: 'targetWWNs is Optional: FC target worldwide - names (WWNs)' items: type: string type: array wwids: - description: 'wwids Optional: FC volume world wide identifiers - (wwids) Either wwids or combination of targetWWNs and - lun must be set, but not both simultaneously.' items: type: string type: array type: object flexVolume: - description: flexVolume represents a generic volume resource - that is provisioned/attached using an exec based plugin. properties: driver: - description: driver is the name of the driver to use for - this volume. type: string fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends - on FlexVolume script. type: string options: additionalProperties: type: string - description: 'options is Optional: this field holds extra - command options if any.' type: object readOnly: - description: 'readOnly is Optional: defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean secretRef: - description: 'secretRef is Optional: secretRef is reference - to the secret object containing sensitive information - to pass to the plugin scripts. This may be empty if no - secret object is specified. If the secret object contains - more than one secret, all secrets are passed to the plugin - scripts.' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic @@ -920,184 +423,88 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running properties: datasetName: - description: datasetName is Name of the dataset stored as - metadata -> name on the dataset for Flocker should be - considered as deprecated type: string datasetUUID: - description: datasetUUID is the UUID of the dataset. This - is unique identifier of a Flocker dataset type: string type: object gcePersistentDisk: - description: 'gcePersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' properties: fsType: - description: 'fsType is filesystem type of the volume that - you want to mount. Tip: Ensure that the filesystem type - is supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string partition: - description: 'partition is the partition in the volume that - you want to mount. If omitted, the default is to mount - by volume name. Examples: For volume /dev/sda1, you specify - the partition as "1". Similarly, the volume partition - for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' format: int32 type: integer pdName: - description: 'pdName is unique name of the PD resource in - GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: string readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: boolean required: - pdName type: object gitRepo: - description: 'gitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision - a container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' properties: directory: - description: directory is the target directory name. Must - not contain or start with '..'. If '.' is supplied, the - volume directory will be the git repository. Otherwise, - if specified, the volume will contain the git repository - in the subdirectory with the given name. type: string repository: - description: repository is the URL type: string revision: - description: revision is the commit hash for the specified - revision. type: string required: - repository type: object glusterfs: - description: 'glusterfs represents a Glusterfs mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' properties: endpoints: - description: 'endpoints is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string path: - description: 'path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string readOnly: - description: 'readOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to - false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: boolean required: - endpoints - path type: object hostPath: - description: 'hostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' properties: path: - description: 'path of the directory on the host. If the - path is a symlink, it will follow the link to the real - path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string type: - description: 'type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string required: - path type: object iscsi: - description: 'iscsi represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to - the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' properties: chapAuthDiscovery: - description: chapAuthDiscovery defines whether support iSCSI - Discovery CHAP authentication type: boolean chapAuthSession: - description: chapAuthSession defines whether support iSCSI - Session CHAP authentication type: boolean fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string initiatorName: - description: initiatorName is the custom iSCSI Initiator - Name. If initiatorName is specified with iscsiInterface - simultaneously, new iSCSI interface : will be created for the connection. type: string iqn: - description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: - description: iscsiInterface is the interface Name that uses - an iSCSI transport. Defaults to 'default' (tcp). type: string lun: - description: lun represents iSCSI Target Lun number. format: int32 type: integer portals: - description: portals is the iSCSI Target Portal List. The - portal is either an IP or ip_addr:port if the port is - other than default (typically TCP ports 860 and 3260). items: type: string type: array readOnly: - description: readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. type: boolean secretRef: - description: secretRef is the CHAP Secret for iSCSI target - and initiator authentication properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic targetPortal: - description: targetPortal is iSCSI Target Portal. The Portal - is either an IP or ip_addr:port if the port is other than - default (typically TCP ports 860 and 3260). type: string required: - iqn @@ -1105,148 +512,67 @@ spec: - targetPortal type: object name: - description: 'name of the volume. Must be a DNS_LABEL and unique - within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string nfs: - description: 'nfs represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' properties: path: - description: 'path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string readOnly: - description: 'readOnly here will force the NFS export to - be mounted with read-only permissions. Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: boolean server: - description: 'server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string required: - path - server type: object persistentVolumeClaim: - description: 'persistentVolumeClaimVolumeSource represents a - reference to a PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: claimName: - description: 'claimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' type: string readOnly: - description: readOnly Will force the ReadOnly setting in - VolumeMounts. Default false. type: boolean required: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine properties: fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string pdID: - description: pdID is the ID that identifies Photon Controller - persistent disk type: string required: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine properties: fsType: - description: fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. type: string readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean volumeID: - description: volumeID uniquely identifies a Portworx volume type: string required: - volumeID type: object projected: - description: projected items for all in one resources secrets, - configmaps, and downward API properties: defaultMode: - description: defaultMode are the mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Directories within the path are - not affected by this setting. This might be in conflict - with other options that affect the file mode, like fsGroup, - and the result can be other mode bits set. format: int32 type: integer sources: - description: sources is the list of volume projections items: - description: Projection that may be projected along with - other supported volume types properties: configMap: - description: configMap information about the configMap - data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the ConfigMap, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain the - '..' path or start with '..'. items: - description: Maps a string key to a path within - a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. type: string required: - key @@ -1254,91 +580,42 @@ spec: type: object type: array name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: optional specify whether the ConfigMap - or its keys must be defined type: boolean type: object x-kubernetes-map-type: atomic downwardAPI: - description: downwardAPI information about the downwardAPI - data to project properties: items: - description: Items is a list of DownwardAPIVolume - file items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field properties: fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name and namespace are supported.' properties: apiVersion: - description: Version of the schema the - FieldPath is written in terms of, - defaults to "v1". type: string fieldPath: - description: Path of the field to select - in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to - set permissions on this file, must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' format: int32 type: integer path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' - path. Must be utf-8 encoded. The first - item of the relative path must not start - with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu - and requests.memory) are currently supported.' properties: containerName: - description: 'Container name: required - for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format - of the exposed resources, defaults - to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to - select' type: string required: - resource @@ -1350,48 +627,16 @@ spec: type: array type: object secret: - description: secret information about the secret data - to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified which - is not present in the Secret, the volume setup - will error unless it is marked optional. Paths - must be relative and may not contain the '..' - path or start with '..'. items: - description: Maps a string key to a path within - a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 and - 0777 or a decimal value between 0 and - 511. YAML accepts both octal and decimal - values, JSON requires decimal values for - mode bits. If not specified, the volume - defaultMode will be used. This might be - in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be - an absolute path. May not contain the - path element '..'. May not start with - the string '..'. type: string required: - key @@ -1399,45 +644,19 @@ spec: type: object type: array name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: optional field specify whether the - Secret or its key must be defined type: boolean type: object x-kubernetes-map-type: atomic serviceAccountToken: - description: serviceAccountToken is information about - the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, the - kubelet volume plugin will proactively rotate - the service account token. The kubelet will - start trying to rotate the token if the token - is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. type: string required: - path @@ -1446,148 +665,76 @@ spec: type: array type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime properties: group: - description: group to map volume access to Default is no - group type: string readOnly: - description: readOnly here will force the Quobyte volume - to be mounted with read-only permissions. Defaults to - false. type: boolean registry: - description: registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes type: string tenant: - description: tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin type: string user: - description: user to map volume access to Defaults to serivceaccount - user type: string volume: - description: volume is a string that references an already - created Quobyte volume by name. type: string required: - registry - volume type: object rbd: - description: 'rbd represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' properties: fsType: - description: 'fsType is the filesystem type of the volume - that you want to mount. Tip: Ensure that the filesystem - type is supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from - compromising the machine' type: string image: - description: 'image is the rados image name. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string keyring: - description: 'keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string monitors: - description: 'monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' items: type: string type: array pool: - description: 'pool is the rados pool name. Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string readOnly: - description: 'readOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: boolean secretRef: - description: 'secretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is - nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic user: - description: 'user is the rados user name. Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string required: - image - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. properties: fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Default is "xfs". type: string gateway: - description: gateway is the host address of the ScaleIO - API Gateway. type: string protectionDomain: - description: protectionDomain is the name of the ScaleIO - Protection Domain for the configured storage. type: string readOnly: - description: readOnly Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: secretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic sslEnabled: - description: sslEnabled Flag enable/disable SSL communication - with Gateway, default false type: boolean storageMode: - description: storageMode indicates whether the storage for - a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. type: string storagePool: - description: storagePool is the ScaleIO Storage Pool associated - with the protection domain. type: string system: - description: system is the name of the storage system as - configured in ScaleIO. type: string volumeName: - description: volumeName is the name of a volume already - created in the ScaleIO system that is associated with - this volume source. type: string required: - gateway @@ -1595,54 +742,19 @@ spec: - system type: object secret: - description: 'secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' properties: defaultMode: - description: 'defaultMode is Optional: mode bits used to - set permissions on created files by default. Must be an - octal value between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. Defaults to - 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer items: - description: items If unspecified, each key-value pair in - the Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and content - is the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in - the Secret, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal values, - JSON requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' format: int32 type: integer path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not start - with the string '..'. type: string required: - key @@ -1650,76 +762,36 @@ spec: type: object type: array optional: - description: optional field specify whether the Secret or - its keys must be defined type: boolean secretName: - description: 'secretName is the name of the secret in the - pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. properties: fsType: - description: fsType is the filesystem type to mount. Must - be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string readOnly: - description: readOnly defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: secretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object x-kubernetes-map-type: atomic volumeName: - description: volumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within - a namespace. type: string volumeNamespace: - description: volumeNamespace specifies the scope of the - volume within StorageOS. If no namespace is specified - then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS - for tighter integration. Set VolumeName to any name to - override the default behaviour. Set to "default" if you - are not using namespaces within StorageOS. Namespaces - that do not pre-exist within StorageOS will be created. type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine properties: fsType: - description: fsType is filesystem type to mount. Must be - a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. type: string storagePolicyID: - description: storagePolicyID is the storage Policy Based - Management (SPBM) profile ID associated with the StoragePolicyName. type: string storagePolicyName: - description: storagePolicyName is the storage Policy Based - Management (SPBM) profile name. type: string volumePath: - description: volumePath is the path that identifies vSphere - volume vmdk type: string required: - volumePath diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml index f36440ab0cd0..517bb30c2ef2 100644 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml +++ b/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml @@ -20,114 +20,60 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: LogsInstance controls an individual logs instance within a Grafana - Agent deployment. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Spec holds the specification of the desired behavior for - the logs instance. properties: additionalScrapeConfigs: - description: "AdditionalScrapeConfigs allows specifying a key of a - Secret containing additional Grafana Agent logging scrape configurations. - Scrape configurations specified are appended to the configurations - generated by the Grafana Agent Operator. \n Job configurations specified - must have the form as specified in the official Promtail documentation: - \n https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs - \n As scrape configs are appended, the user is responsible to make - sure it is valid. Note that using this feature may expose the possibility - to break upgrades of Grafana Agent. It is advised to review both - Grafana Agent and Promtail release notes to ensure that no incompatible - scrape configs are going to break Grafana Agent after the upgrade." properties: key: - description: The key of the secret to select from. Must be a - valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic clients: - description: Clients controls where logs are written to for this instance. items: - description: LogsClientSpec defines the client integration for logs, - indicating which Loki server to send logs to. properties: backoffConfig: - description: Configures how to retry requests to Loki when a - request fails. Defaults to a minPeriod of 500ms, maxPeriod - of 5m, and maxRetries of 10. properties: maxPeriod: - description: Maximum backoff time between retries. type: string maxRetries: - description: Maximum number of retries to perform before - giving up a request. type: integer minPeriod: - description: Initial backoff time between retries. Time - between retries is increased exponentially. type: string type: object basicAuth: - description: BasicAuth for the Loki server. properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -135,67 +81,40 @@ spec: x-kubernetes-map-type: atomic type: object batchSize: - description: Maximum batch size (in bytes) of logs to accumulate - before sending the batch to Loki. type: integer batchWait: - description: Maximum amount of time to wait before sending a - batch, even if that batch isn't full. type: string bearerToken: - description: BearerToken used for remote_write. type: string bearerTokenFile: - description: BearerTokenFile used to read bearer token. type: string externalLabels: additionalProperties: type: string - description: ExternalLabels are labels to add to any time series - when sending data to Loki. type: object oauth2: - description: Oauth2 for URL properties: clientId: - description: The secret or configmap containing the OAuth2 - client id properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -203,19 +122,12 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client secret properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -224,15 +136,12 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL type: object scopes: - description: OAuth2 scopes used for the token request items: type: string type: array tokenUrl: - description: The URL to fetch the token from minLength: 1 type: string required: @@ -241,61 +150,34 @@ spec: - tokenUrl type: object proxyUrl: - description: ProxyURL to proxy requests through. Optional. type: string tenantId: - description: Tenant ID used by default to push logs to Loki. - If omitted assumes remote Loki is running in single-tenant - mode or an authentication layer is used to inject an X-Scope-OrgID - header. type: string timeout: - description: Maximum time to wait for a server to respond to - a request. type: string tlsConfig: - description: TLSConfig to use for the client. Only used when - the protocol of the URL is https. properties: ca: - description: Certificate authority used when verifying server - certificates. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -303,47 +185,28 @@ spec: x-kubernetes-map-type: atomic type: object caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. type: string cert: - description: Client certificate to present when doing client-authentication. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -351,76 +214,42 @@ spec: x-kubernetes-map-type: atomic type: object certFile: - description: Path to the client cert file in the Prometheus - container for the targets. type: string insecureSkipVerify: - description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. type: string keySecret: - description: Secret containing the client key file for the - targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object url: - description: 'URL is the URL where Loki is listening. Must be - a full HTTP URL, including protocol. Required. Example: https://logs-prod-us-central1.grafana.net/loki/api/v1/push.' type: string required: - url type: object type: array podLogsNamespaceSelector: - description: Set of labels to determine which namespaces should be - watched for PodLogs. If not provided, checks only namespace of the - instance. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -432,41 +261,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic podLogsSelector: - description: Determines which PodLogs should be selected for including - in this instance. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -478,20 +285,12 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic targetConfig: - description: Configures how tailed targets are watched. properties: syncPeriod: - description: Period to resync directories being watched and files - being tailed to discover new ones or stop watching removed ones. type: string type: object type: object diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml index 015c0339ce1a..610193f440f9 100644 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml +++ b/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml @@ -20,89 +20,41 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: MetricsInstance controls an individual Metrics instance within - a Grafana Agent deployment. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Spec holds the specification of the desired behavior for - the Metrics instance. properties: additionalScrapeConfigs: - description: 'AdditionalScrapeConfigs lets you specify a key of a - Secret containing additional Grafana Agent Prometheus scrape configurations. - The specified scrape configurations are appended to the configurations - generated by Grafana Agent Operator. Specified job configurations - must have the form specified in the official Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. - As scrape configs are appended, you must make sure the configuration - is still valid. Note that it''s possible that this feature will - break future upgrades of Grafana Agent. Review both Grafana Agent - and Prometheus release notes to ensure that no incompatible scrape - configs will break Grafana Agent after the upgrade.' properties: key: - description: The key of the secret to select from. Must be a - valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic maxWALTime: - description: MaxWALTime is the maximum amount of time that series - and samples can exist in the WAL before being forcibly deleted. type: string minWALTime: - description: MinWALTime is the minimum amount of time that series - and samples can exist in the WAL before being considered for deletion. type: string podMonitorNamespaceSelector: - description: PodMonitorNamespaceSelector are the set of labels to - determine which namespaces to watch for PodMonitor discovery. If - nil, it only checks its own namespace. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -114,41 +66,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic podMonitorSelector: - description: PodMonitorSelector determines which PodMonitors to selected - for target discovery. Experimental. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -160,42 +90,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic probeNamespaceSelector: - description: ProbeNamespaceSelector is the set of labels that determines - which namespaces to watch for Probe discovery. If nil, it only checks - own namespace. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -207,41 +114,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic probeSelector: - description: ProbeSelector determines which Probes to select for target - discovery. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -253,62 +138,35 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic remoteFlushDeadline: - description: RemoteFlushDeadline is the deadline for flushing data - when an instance shuts down. type: string remoteWrite: - description: RemoteWrite controls remote_write settings for this instance. items: - description: RemoteWriteSpec defines the remote_write configuration - for Prometheus. properties: basicAuth: - description: BasicAuth for the URL. properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -316,78 +174,45 @@ spec: x-kubernetes-map-type: atomic type: object bearerToken: - description: BearerToken used for remote_write. type: string bearerTokenFile: - description: BearerTokenFile used to read bearer token. type: string headers: additionalProperties: type: string - description: Headers is a set of custom HTTP headers to be sent - along with each remote_write request. Be aware that any headers - set by Grafana Agent itself can't be overwritten. type: object metadataConfig: - description: MetadataConfig configures the sending of series - metadata to remote storage. properties: send: - description: Send enables metric metadata to be sent to - remote storage. type: boolean sendInterval: - description: SendInterval controls how frequently metric - metadata is sent to remote storage. type: string type: object name: - description: Name of the remote_write queue. Must be unique - if specified. The name is used in metrics and logging in order - to differentiate queues. type: string oauth2: - description: Oauth2 for URL properties: clientId: - description: The secret or configmap containing the OAuth2 - client id properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -395,19 +220,12 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client secret properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -416,15 +234,12 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL type: object scopes: - description: OAuth2 scopes used for the token request items: type: string type: array tokenUrl: - description: The URL to fetch the token from minLength: 1 type: string required: @@ -433,106 +248,57 @@ spec: - tokenUrl type: object proxyUrl: - description: ProxyURL to proxy requests through. Optional. type: string queueConfig: - description: QueueConfig allows tuning of the remote_write queue - parameters. properties: batchSendDeadline: - description: BatchSendDeadline is the maximum time a sample - will wait in the buffer. type: string capacity: - description: Capacity is the number of samples to buffer - per shard before samples start being dropped. type: integer maxBackoff: - description: MaxBackoff is the maximum retry delay. type: string maxRetries: - description: MaxRetries is the maximum number of times to - retry a batch on recoverable errors. type: integer maxSamplesPerSend: - description: MaxSamplesPerSend is the maximum number of - samples per send. type: integer maxShards: - description: MaxShards is the maximum number of shards, - i.e., the amount of concurrency. type: integer minBackoff: - description: MinBackoff is the initial retry delay. MinBackoff - is doubled for every retry. type: string minShards: - description: MinShards is the minimum number of shards, - i.e., the amount of concurrency. type: integer retryOnRateLimit: - description: RetryOnRateLimit retries requests when encountering - rate limits. type: boolean type: object remoteTimeout: - description: RemoteTimeout is the timeout for requests to the - remote_write endpoint. type: string sigv4: - description: SigV4 configures SigV4-based authentication to - the remote_write endpoint. SigV4-based authentication is used - if SigV4 is defined, even with an empty object. properties: accessKey: - description: AccessKey holds the secret of the AWS API access - key to use for signing. If not provided, the environment - variable AWS_ACCESS_KEY_ID is used. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic profile: - description: Profile is the named AWS profile to use for - authentication. type: string region: - description: Region of the AWS endpoint. If blank, the region - from the default credentials chain is used. type: string roleARN: - description: RoleARN is the AWS Role ARN to use for authentication, - as an alternative for using the AWS API keys. type: string secretKey: - description: SecretKey of the AWS API to use for signing. - If blank, the environment variable AWS_SECRET_ACCESS_KEY - is used. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key @@ -540,47 +306,28 @@ spec: x-kubernetes-map-type: atomic type: object tlsConfig: - description: TLSConfig to use for remote_write. properties: ca: - description: Certificate authority used when verifying server - certificates. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -588,47 +335,28 @@ spec: x-kubernetes-map-type: atomic type: object caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. type: string cert: - description: Client certificate to present when doing client-authentication. properties: configMap: - description: ConfigMap containing data to use for the - targets. properties: key: - description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic secret: - description: Secret containing data to use for the targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined type: boolean required: - key @@ -636,57 +364,33 @@ spec: x-kubernetes-map-type: atomic type: object certFile: - description: Path to the client cert file in the Prometheus - container for the targets. type: string insecureSkipVerify: - description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. type: string keySecret: - description: Secret containing the client key file for the - targets. properties: key: - description: The key of the secret to select from. Must - be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key must - be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic serverName: - description: Used to verify the hostname for the targets. type: string type: object url: - description: URL of the endpoint to send samples to. type: string writeRelabelConfigs: - description: WriteRelabelConfigs holds relabel_configs to relabel - samples before they are sent to the remote_write endpoint. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It - defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. - Default is 'replace'. uppercase and lowercase actions - require Prometheus >= 2.36. enum: - replace - Replace @@ -712,39 +416,20 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label name - which may only contain ASCII letters, numbers, as - well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. type: string type: object type: array @@ -753,33 +438,15 @@ spec: type: object type: array serviceMonitorNamespaceSelector: - description: ServiceMonitorNamespaceSelector is the set of labels - that determine which namespaces to watch for ServiceMonitor discovery. - If nil, it only checks its own namespace. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -791,41 +458,19 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic serviceMonitorSelector: - description: ServiceMonitorSelector determines which ServiceMonitors - to select for target discovery. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -837,23 +482,12 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic walTruncateFrequency: - description: WALTruncateFrequency specifies how frequently to run - the WAL truncation process. Higher values cause the WAL to increase - and for old series to stay in the WAL longer, but reduces the chance - of data loss when remote_write fails for longer than the given frequency. type: string writeStaleOnShutdown: - description: WriteStaleOnShutdown writes staleness markers on shutdown - for all series. type: boolean type: object type: object diff --git a/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml index ff6531f61e88..f22d051b5131 100644 --- a/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml +++ b/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml @@ -20,325 +20,146 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: PodLogs defines how to collect logs for a pod. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Spec holds the specification of the desired behavior for - the PodLogs. properties: jobLabel: - description: The label to use to retrieve the job name from. type: string namespaceSelector: - description: Selector to select which namespaces the Pod objects are - discovered from. properties: any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. type: boolean matchNames: - description: List of namespace names to select from. items: type: string type: array type: object pipelineStages: - description: Pipeline stages for this pod. Pipeline stages support - transforming and filtering log lines. items: - description: "PipelineStageSpec defines an individual pipeline stage. - Each stage type is mutually exclusive and no more than one may - be set per stage. \n More information on pipelines can be found - in the Promtail documentation: https://grafana.com/docs/loki/latest/clients/promtail/pipelines/" properties: cri: - description: 'CRI is a parsing stage that reads log lines using - the standard CRI logging format. Supply cri: {} to enable.' type: object docker: - description: 'Docker is a parsing stage that reads log lines - using the standard Docker logging format. Supply docker: {} - to enable.' type: object drop: - description: Drop is a filtering stage that lets you drop certain - logs. properties: dropCounterReason: - description: Every time a log line is dropped, the metric - logentry_dropped_lines_total is incremented. A "reason" - label is added, and can be customized by providing a custom - value here. Defaults to "drop_stage". type: string expression: - description: "RE2 regular expression. \n If source is provided, - the regex attempts to match the source. \n If no source - is provided, then the regex attempts to attach the log - line. \n If the provided regex matches the log line or - a provided source, the line is dropped." type: string longerThan: - description: LongerThan will drop a log line if it its content - is longer than this value (in bytes). Can be expressed - as an integer (8192) or a number with a suffix (8kb). type: string olderThan: - description: OlderThan will be parsed as a Go duration. - If the log line's timestamp is older than the current - time minus the provided duration, it will be dropped. type: string source: - description: Name from the extract data to parse. If empty, - uses the log message. type: string value: - description: "Value can only be specified when source is - specified. If the value provided is an exact match for - the given source then the line will be dropped. \n Mutually - exclusive with expression." type: string type: object json: - description: "JSON is a parsing stage that reads the log line - as JSON and accepts JMESPath expressions to extract data. - \n Information on JMESPath: http://jmespath.org/" properties: expressions: additionalProperties: type: string - description: "Set of the key/value pairs of JMESPath expressions. - The key will be the key in the extracted data while the - expression will be the value, evaluated as a JMESPath - from the source data. \n Literal JMESPath expressions - can be used by wrapping a key in double quotes, which - then must be wrapped again in single quotes in YAML so - they get passed to the JMESPath parser." type: object source: - description: Name from the extracted data to parse as JSON. - If empty, uses entire log message. type: string type: object labelAllow: - description: LabelAllow is an action stage that only allows - the provided labels to be included in the label set that is - sent to Loki with the log entry. items: type: string type: array labelDrop: - description: LabelDrop is an action stage that drops labels - from the label set that is sent to Loki with the log entry. items: type: string type: array labels: additionalProperties: type: string - description: "Labels is an action stage that takes data from - the extracted map and modifies the label set that is sent - to Loki with the log entry. \n The key is REQUIRED and represents - the name for the label that will be created. Value is optional - and will be the name from extracted data to use for the value - of the label. If the value is not provided, it defaults to - match the key." type: object limit: - description: Limit is a rate-limiting stage that throttles logs - based on several options. properties: burst: - description: The cap in the quantity of burst lines that - Promtail will push to Loki. type: integer drop: - description: "When drop is true, log lines that exceed the - current rate limit are discarded. When drop is false, - log lines that exceed the current rate limit wait to enter - the back pressure mode. \n Defaults to false." type: boolean rate: - description: The rate limit in lines per second that Promtail - will push to Loki. type: integer type: object match: - description: Match is a filtering stage that conditionally applies - a set of stages or drop entries when a log entry matches a - configurable LogQL stream selector and filter expressions. properties: action: - description: Determines what action is taken when the selector - matches the log line. Can be keep or drop. Defaults to - keep. When set to drop, entries are dropped and no later - metrics are recorded. Stages must be empty when dropping - metrics. type: string dropCounterReason: - description: Every time a log line is dropped, the metric - logentry_dropped_lines_total is incremented. A "reason" - label is added, and can be customized by providing a custom - value here. Defaults to "match_stage." type: string pipelineName: - description: Names the pipeline. When defined, creates an - additional label in the pipeline_duration_seconds histogram, - where the value is concatenated with job_name using an - underscore. type: string selector: - description: LogQL stream selector and filter expressions. - Required. type: string stages: - description: "Nested set of pipeline stages to execute when - action is keep and the log line matches selector. \n An - example value for stages may be: \n stages: | - json: - {} - labelAllow: [foo, bar] \n Note that stages is a string - because SIG API Machinery does not support recursive types, - and so it cannot be validated for correctness. Be careful - not to mistype anything." type: string required: - selector type: object metrics: additionalProperties: - description: MetricsStageSpec is an action stage that allows - for defining and updating metrics based on data from the - extracted map. Created metrics are not pushed to Loki or - Prometheus and are instead exposed via the /metrics endpoint - of the Grafana Agent pod. The Grafana Agent Operator should - be configured with a MetricsInstance that discovers the - logging DaemonSet to collect metrics created by this stage. properties: action: - description: "The action to take against the metric. Required. - \n Must be either \"inc\" or \"add\" for type: counter - or type: histogram. When type: gauge, must be one of - \"set\", \"inc\", \"dec\", \"add\", or \"sub\". \n \"add\", - \"set\", or \"sub\" requires the extracted value to - be convertible to a positive float." type: string buckets: - description: 'Buckets to create. Bucket values must be - convertible to float64s. Extremely large or small numbers - are subject to some loss of precision. Only valid for - type: histogram.' items: type: string type: array countEntryBytes: - description: "If true all log line bytes are counted. - Can only be set with matchAll: true and action: add. - \n Only valid for type: counter." type: boolean description: - description: Sets the description for the created metric. type: string matchAll: - description: "If true, all log lines are counted without - attempting to match the source to the extracted map. - Mutually exclusive with value. \n Only valid for type: - counter." type: boolean maxIdleDuration: - description: "Label values on metrics are dynamic which - can cause exported metrics to go stale. To prevent unbounded - cardinality, any metrics not updated within MaxIdleDuration - are removed. \n Must be greater or equal to 1s. Defaults - to 5m." type: string prefix: - description: Sets the custom prefix name for the metric. - Defaults to "promtail_custom_". type: string source: - description: Key from the extracted data map to use for - the metric. Defaults to the metrics name if not present. type: string type: - description: The metric type to create. Must be one of - counter, gauge, histogram. Required. type: string value: - description: Filters down source data and only changes - the metric if the targeted value matches the provided - string exactly. If not present, all data matches. type: string required: - action - type type: object - description: Metrics is an action stage that supports defining - and updating metrics based on data from the extracted map. - Created metrics are not pushed to Loki or Prometheus and are - instead exposed via the /metrics endpoint of the Grafana Agent - pod. The Grafana Agent Operator should be configured with - a MetricsInstance that discovers the logging DaemonSet to - collect metrics created by this stage. type: object multiline: - description: Multiline stage merges multiple lines into a multiline - block before passing it on to the next stage in the pipeline. properties: firstLine: - description: RE2 regular expression. Creates a new multiline - block when matched. Required. type: string maxLines: - description: Maximum number of lines a block can have. A - new block is started if the number of lines surpasses - this value. Defaults to 128. type: integer maxWaitTime: - description: Maximum time to wait before passing on the - multiline block to the next stage if no new lines are - received. Defaults to 3s. type: string required: - firstLine type: object output: - description: Output stage is an action stage that takes data - from the extracted map and changes the log line that will - be sent to Loki. properties: source: - description: Name from extract data to use for the log entry. - Required. type: string required: - source type: object pack: - description: Pack is a transform stage that lets you embed extracted - values and labels into the log line by packing the log line - and labels inside of a JSON object. properties: ingestTimestamp: - description: If the resulting log line should use any existing - timestamp or use time.Now() when the line was created. - Set to true when combining several log streams from different - containers to avoid out of order errors. type: boolean labels: - description: Name from extracted data or line labels. Required. - Labels provided here are automatically removed from output - labels. items: type: string type: array @@ -346,107 +167,57 @@ spec: - labels type: object regex: - description: Regex is a parsing stage that parses a log line - using a regular expression. Named capture groups in the regex - allows for adding data into the extracted map. properties: expression: - description: RE2 regular expression. Each capture group - MUST be named. Required. type: string source: - description: Name from extracted data to parse. If empty, - defaults to using the log message. type: string required: - expression type: object replace: - description: Replace is a parsing stage that parses a log line - using a regular expression and replaces the log line. Named - capture groups in the regex allows for adding data into the - extracted map. properties: expression: - description: RE2 regular expression. Each capture group - MUST be named. Required. type: string replace: - description: Value to replace the captured group with. type: string source: - description: Name from extracted data to parse. If empty, - defaults to using the log message. type: string required: - expression type: object template: - description: Template is a transform stage that manipulates - the values in the extracted map using Go's template syntax. properties: source: - description: Name from extracted data to parse. Required. - If empty, defaults to using the log message. type: string template: - description: Go template string to use. Required. In addition - to normal template functions, ToLower, ToUpper, Replace, - Trim, TrimLeft, TrimRight, TrimPrefix, and TrimSpace are - also available. type: string required: - source - template type: object tenant: - description: Tenant is an action stage that sets the tenant - ID for the log entry picking it from a field in the extracted - data map. If the field is missing, the default LogsClientSpec.tenantId - will be used. properties: label: - description: Name from labels whose value should be set - as tenant ID. Mutually exclusive with source and value. type: string source: - description: Name from extracted data to use as the tenant - ID. Mutually exclusive with label and value. type: string value: - description: Value to use for the template ID. Useful when - this stage is used within a conditional pipeline such - as match. Mutually exclusive with label and source. type: string type: object timestamp: - description: Timestamp is an action stage that can change the - timestamp of a log line before it is sent to Loki. If not - present, the timestamp of a log line defaults to the time - when the log line was read. properties: actionOnFailure: - description: Action to take when the timestamp can't be - extracted or parsed. Can be skip or fudge. Defaults to - fudge. type: string fallbackFormats: - description: Fallback formats to try if format fails. items: type: string type: array format: - description: 'Determines format of the time string. Required. - Can be one of: ANSIC, UnixDate, RubyDate, RFC822, RFC822Z, - RFC850, RFC1123, RFC1123Z, RFC3339, RFC3339Nano, Unix, - UnixMs, UnixUs, UnixNs.' type: string location: - description: IANA Timezone Database string. type: string source: - description: Name from extracted data to use as the timestamp. - Required. type: string required: - format @@ -455,26 +226,14 @@ spec: type: object type: array podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes Pod - onto the target. items: type: string type: array relabelings: - description: "RelabelConfigs to apply to logs before delivering. Grafana - Agent Operator automatically adds relabelings for a few standard - Kubernetes fields and replaces original scrape job name with __tmp_logs_job_name. - \n More info: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#relabel_configs" items: - description: 'RelabelConfig allows dynamic rewriting of the label - set, being applied to samples before ingestion. It defines ``-section - of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: action: default: replace - description: Action to perform based on regex matching. Default - is 'replace'. uppercase and lowercase actions require Prometheus - >= 2.36. enum: - replace - Replace @@ -500,67 +259,33 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source label - values. format: int64 type: integer regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' type: string replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex capture - groups are available. Default is '$1' type: string separator: - description: Separator placed between concatenated source label - values. default is ';'. type: string sourceLabels: - description: The source labels select values from existing labels. - Their content is concatenated using the configured separator - and matched against the configured regular expression for - the replace, keep, and drop actions. items: - description: LabelName is a valid Prometheus label name which - may only contain ASCII letters, numbers, as well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: - description: Label to which the resulting value is written in - a replace action. It is mandatory for replace actions. Regex - capture groups are available. type: string type: object type: array selector: - description: Selector to select Pod objects. Required. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. properties: key: - description: key is the label key that the selector applies - to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array @@ -572,11 +297,6 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. type: object type: object x-kubernetes-map-type: atomic diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/grafana-agent/CHANGELOG.md index f281cc937317..f6b9caf9aedb 100644 --- a/operations/helm/charts/grafana-agent/CHANGELOG.md +++ b/operations/helm/charts/grafana-agent/CHANGELOG.md @@ -10,6 +10,23 @@ internal API changes are not present. Unreleased ---------- +0.31.1 (2024-01-19) +------------------- + +### Enhancements + +- Add `kubectl.kubernetes.io/default-container: grafana-agent` annotation to allow various tools to choose `grafana-agent` container as default target (@aerfio) + +- Add support for topology spread constraints in helm chart. (@etiennep) + +- Update Grafana Agent version to v0.39.1. (@marctc) + +### Bugfixes + +- Fix a bug preventing the `.Values.configReloader.image.digest` Helm value to be correctly retrieved. (@claudioscalzo) + +- Fix a bug preventing digests to be used as labels because of their length. Labels values [must be 63 characters or less](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). (@claudioscalzo) + 0.31.0 (2024-01-10) ------------------- diff --git a/operations/helm/charts/grafana-agent/Chart.yaml b/operations/helm/charts/grafana-agent/Chart.yaml index 992503befc2a..76675dcf65a7 100644 --- a/operations/helm/charts/grafana-agent/Chart.yaml +++ b/operations/helm/charts/grafana-agent/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: grafana-agent description: 'Grafana Agent' type: application -version: 0.31.0 -appVersion: 'v0.39.0' +version: 0.31.1 +appVersion: 'v0.39.1' dependencies: - name: crds diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/grafana-agent/README.md index 281fc208cf23..4904736239cc 100644 --- a/operations/helm/charts/grafana-agent/README.md +++ b/operations/helm/charts/grafana-agent/README.md @@ -1,6 +1,6 @@ # Grafana Agent Helm chart -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.31.0](https://img.shields.io/badge/Version-0.31.0-informational?style=flat-square) ![AppVersion: v0.39.0](https://img.shields.io/badge/AppVersion-v0.39.0-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.31.1](https://img.shields.io/badge/Version-0.31.1-informational?style=flat-square) ![AppVersion: v0.39.1](https://img.shields.io/badge/AppVersion-v0.39.1-informational?style=flat-square) Helm chart for deploying [Grafana Agent][] to Kubernetes. @@ -89,6 +89,7 @@ use the older mode (called "static mode"), set the `agent.mode` value to | controller.priorityClassName | string | `""` | priorityClassName to apply to Grafana Agent pods. | | controller.replicas | int | `1` | Number of pods to deploy. Ignored when controller.type is 'daemonset'. | | controller.tolerations | list | `[]` | Tolerations to apply to Grafana Agent pods. | +| controller.topologySpreadConstraints | list | `[]` | Topology Spread Constraints to apply to Grafana Agent pods. | | controller.type | string | `"daemonset"` | Type of controller to use for deploying Grafana Agent in the cluster. Must be one of 'daemonset', 'deployment', or 'statefulset'. | | controller.updateStrategy | object | `{}` | Update strategy for updating deployed Pods. | | controller.volumeClaimTemplates | list | `[]` | volumeClaimTemplates to add when controller.type is 'statefulset'. | diff --git a/operations/helm/charts/grafana-agent/ci/pod_annotations-values.yaml b/operations/helm/charts/grafana-agent/ci/pod_annotations-values.yaml new file mode 100644 index 000000000000..6af944e7eebc --- /dev/null +++ b/operations/helm/charts/grafana-agent/ci/pod_annotations-values.yaml @@ -0,0 +1,4 @@ +# Test correct rendering of the pod annotations +controller: + podAnnotations: + testAnnotationKey: testAnnotationValue diff --git a/operations/helm/charts/grafana-agent/ci/topologyspreadconstraints-values.yaml b/operations/helm/charts/grafana-agent/ci/topologyspreadconstraints-values.yaml new file mode 100644 index 000000000000..d69b5662c255 --- /dev/null +++ b/operations/helm/charts/grafana-agent/ci/topologyspreadconstraints-values.yaml @@ -0,0 +1,10 @@ +controller: + type: deployment + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent diff --git a/operations/helm/charts/grafana-agent/ci/with-digests-values.yaml b/operations/helm/charts/grafana-agent/ci/with-digests-values.yaml new file mode 100644 index 000000000000..d742dd2947f3 --- /dev/null +++ b/operations/helm/charts/grafana-agent/ci/with-digests-values.yaml @@ -0,0 +1,10 @@ +image: + registry: "docker.io" + repository: "grafana/agent" + digest: "sha256:82575a7be3e4770e53f620298e58bcc4cdb0fd0338e01c4b206cae9e3ca46ebf" + +configReloader: + image: + registry: "docker.io" + repository: "jimmidyson/configmap-reload" + digest: "sha256:5af9d3041d12a3e63f115125f89b66d2ba981fe82e64302ac370c5496055059c" diff --git a/operations/helm/charts/grafana-agent/templates/_helpers.tpl b/operations/helm/charts/grafana-agent/templates/_helpers.tpl index 342a330a8aa6..7b2e825e8879 100644 --- a/operations/helm/charts/grafana-agent/templates/_helpers.tpl +++ b/operations/helm/charts/grafana-agent/templates/_helpers.tpl @@ -56,8 +56,9 @@ app.kubernetes.io/version: "vX.Y.Z" app.kubernetes.io/managed-by: {{ .Release.Service }} {{- else }} {{/* substr trims delimeter prefix char from grafana-agent.imageId output - e.g. ':' for tags and '@' for digests. */}} -app.kubernetes.io/version: {{ substr 1 -1 (include "grafana-agent.imageId" .) }} + e.g. ':' for tags and '@' for digests. + For digests, we crop the string to a 7-char (short) sha. */}} +app.kubernetes.io/version: {{ (include "grafana-agent.imageId" .) | trunc 15 | trimPrefix "@sha256" | trimPrefix ":" | quote }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{- end }} @@ -103,7 +104,7 @@ Calculate name of image ID to use for "config-reloader". */}} {{- define "config-reloader.imageId" -}} {{- if .Values.configReloader.image.digest }} -{{- $digest := .Values.configReloader.digest }} +{{- $digest := .Values.configReloader.image.digest }} {{- if not (hasPrefix "sha256:" $digest) }} {{- $digest = printf "sha256:%s" $digest }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml index 907b5eb6bfc1..235bc279ec38 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml @@ -1,9 +1,10 @@ {{- define "grafana-agent.pod-template" -}} metadata: - {{- with .Values.controller.podAnnotations }} annotations: - {{- toYaml . | nindent 4 }} - {{- end }} + kubectl.kubernetes.io/default-container: grafana-agent + {{- with .Values.controller.podAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} labels: {{- include "grafana-agent.selectorLabels" . | nindent 4 }} {{- with .Values.controller.podLabels }} @@ -57,6 +58,10 @@ spec: tolerations: {{- toYaml . | nindent 4 }} {{- end }} + {{- with .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 4 }} + {{- end }} volumes: - name: config configMap: diff --git a/operations/helm/charts/grafana-agent/values.yaml b/operations/helm/charts/grafana-agent/values.yaml index 0143cefe1962..9050976a43b9 100644 --- a/operations/helm/charts/grafana-agent/values.yaml +++ b/operations/helm/charts/grafana-agent/values.yaml @@ -179,6 +179,9 @@ controller: # -- Tolerations to apply to Grafana Agent pods. tolerations: [] + # -- Topology Spread Constraints to apply to Grafana Agent pods. + topologySpreadConstraints: [] + # -- priorityClassName to apply to Grafana Agent pods. priorityClassName: '' diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml index a99f2fd4cbf6..ac5ccd4389b9 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml index 340ec8ca04fd..fb64c8abfc9b 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml @@ -21,6 +21,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -28,7 +30,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml index 01a82312a35c..2b36fc32980a 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml index 0f6c1dc493bd..252d5e276878 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml index a99f2fd4cbf6..ac5ccd4389b9 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml index b8d1f559d7a4..98397b6c00f4 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml index 7de807bd5238..5ccee146ffef 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml @@ -19,6 +19,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -26,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml index 6a127404aecb..132210c7c2e3 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml @@ -20,6 +20,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -27,7 +29,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml index c0b85e38db1d..6f7d11f76fa9 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml @@ -21,6 +21,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -28,7 +30,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml index a99f2fd4cbf6..ac5ccd4389b9 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml index a99f2fd4cbf6..ac5ccd4389b9 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml index a99f2fd4cbf6..ac5ccd4389b9 100644 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml index e9b849f6ee3f..85fb01959587 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml index fbdb2752b3ed..b6a93df6f9f8 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml index c2ee2305fce5..4f45d0fc40ed 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml index 8e51da6a86f5..c07733057e80 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml index d366cde166d3..c955003a200e 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 808c1056ba2b..1f05afd30c17 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -30,7 +32,7 @@ spec: - name: global-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 014f4b84925d..0048873d187e 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.39.0 + image: quay.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml index 70c184364e58..0d58403356bb 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -43,7 +45,7 @@ spec: name: geoip containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 319e1cf07ad2..2ce6b7ad7a21 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -27,7 +29,7 @@ spec: - name: local-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 014f4b84925d..0048873d187e 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.39.0 + image: quay.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml index 21338cf3979f..b4c896139945 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/configmap.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/configmap.yaml new file mode 100644 index 000000000000..2fdc6f011777 --- /dev/null +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: grafana-agent/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml new file mode 100644 index 000000000000..8fb72ca6f523 --- /dev/null +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml @@ -0,0 +1,76 @@ +--- +# Source: grafana-agent/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent + testAnnotationKey: testAnnotationValue + labels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + spec: + serviceAccountName: grafana-agent + containers: + - name: grafana-agent + image: docker.io/grafana/agent:v0.39.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/agent/config.river + - --storage.path=/tmp/agent + - --server.http.listen-addr=0.0.0.0:80 + - --server.http.ui-path-prefix=/ + env: + - name: AGENT_MODE + value: flow + - name: AGENT_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 80 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/agent + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/agent + - --webhook-url=http://localhost:80/-/reload + volumeMounts: + - name: config + mountPath: /etc/agent + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + volumes: + - name: config + configMap: + name: grafana-agent diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/rbac.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/rbac.yaml new file mode 100644 index 000000000000..3765583fb64f --- /dev/null +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: grafana-agent +subjects: + - kind: ServiceAccount + name: grafana-agent + namespace: default diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/service.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/service.yaml new file mode 100644 index 000000000000..04f6eeff3c4d --- /dev/null +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/service.yaml @@ -0,0 +1,22 @@ +--- +# Source: grafana-agent/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/serviceaccount.yaml new file mode 100644 index 000000000000..65d7e0df383f --- /dev/null +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: grafana-agent/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: grafana-agent + namespace: default + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml index b98de68b1306..bb9b6a1ba64d 100644 --- a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml index 1f9e23a47bf0..f676d9f3e046 100644 --- a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml @@ -18,6 +18,8 @@ spec: app.kubernetes.io/instance: grafana-agent template: metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent labels: app.kubernetes.io/name: grafana-agent app.kubernetes.io/instance: grafana-agent @@ -25,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - -config.file=/etc/agent/config.yaml diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/configmap.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/configmap.yaml new file mode 100644 index 000000000000..2fdc6f011777 --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: grafana-agent/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml new file mode 100644 index 000000000000..b28114e09e4d --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml @@ -0,0 +1,84 @@ +--- +# Source: grafana-agent/templates/controllers/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent + labels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + spec: + serviceAccountName: grafana-agent + containers: + - name: grafana-agent + image: docker.io/grafana/agent:v0.39.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/agent/config.river + - --storage.path=/tmp/agent + - --server.http.listen-addr=0.0.0.0:80 + - --server.http.ui-path-prefix=/ + env: + - name: AGENT_MODE + value: flow + - name: AGENT_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 80 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/agent + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/agent + - --webhook-url=http://localhost:80/-/reload + volumeMounts: + - name: config + mountPath: /etc/agent + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/name: grafana-agent + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + volumes: + - name: config + configMap: + name: grafana-agent diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/rbac.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/rbac.yaml new file mode 100644 index 000000000000..3765583fb64f --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: grafana-agent +subjects: + - kind: ServiceAccount + name: grafana-agent + namespace: default diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/service.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/service.yaml new file mode 100644 index 000000000000..04f6eeff3c4d --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/service.yaml @@ -0,0 +1,22 @@ +--- +# Source: grafana-agent/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/serviceaccount.yaml new file mode 100644 index 000000000000..65d7e0df383f --- /dev/null +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: grafana-agent/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: grafana-agent + namespace: default + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/configmap.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/configmap.yaml new file mode 100644 index 000000000000..2fdc6f011777 --- /dev/null +++ b/operations/helm/tests/with-digests/grafana-agent/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: grafana-agent/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml new file mode 100644 index 000000000000..fbe3f266523b --- /dev/null +++ b/operations/helm/tests/with-digests/grafana-agent/templates/controllers/daemonset.yaml @@ -0,0 +1,75 @@ +--- +# Source: grafana-agent/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: grafana-agent + labels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + spec: + serviceAccountName: grafana-agent + containers: + - name: grafana-agent + image: docker.io/grafana/agent@sha256:82575a7be3e4770e53f620298e58bcc4cdb0fd0338e01c4b206cae9e3ca46ebf + imagePullPolicy: IfNotPresent + args: + - run + - /etc/agent/config.river + - --storage.path=/tmp/agent + - --server.http.listen-addr=0.0.0.0:80 + - --server.http.ui-path-prefix=/ + env: + - name: AGENT_MODE + value: flow + - name: AGENT_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 80 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/agent + - name: config-reloader + image: docker.io/jimmidyson/configmap-reload@sha256:5af9d3041d12a3e63f115125f89b66d2ba981fe82e64302ac370c5496055059c + args: + - --volume-dir=/etc/agent + - --webhook-url=http://localhost:80/-/reload + volumeMounts: + - name: config + mountPath: /etc/agent + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + volumes: + - name: config + configMap: + name: grafana-agent diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/rbac.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/rbac.yaml new file mode 100644 index 000000000000..3765583fb64f --- /dev/null +++ b/operations/helm/tests/with-digests/grafana-agent/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: grafana-agent +subjects: + - kind: ServiceAccount + name: grafana-agent + namespace: default diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/service.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/service.yaml new file mode 100644 index 000000000000..04f6eeff3c4d --- /dev/null +++ b/operations/helm/tests/with-digests/grafana-agent/templates/service.yaml @@ -0,0 +1,22 @@ +--- +# Source: grafana-agent/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/with-digests/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/with-digests/grafana-agent/templates/serviceaccount.yaml new file mode 100644 index 000000000000..65d7e0df383f --- /dev/null +++ b/operations/helm/tests/with-digests/grafana-agent/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: grafana-agent/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: grafana-agent + namespace: default + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/packaging/grafana-agent-flow/windows/install_script.nsis b/packaging/grafana-agent-flow/windows/install_script.nsis index a333807c1196..469a2cbd97b0 100644 --- a/packaging/grafana-agent-flow/windows/install_script.nsis +++ b/packaging/grafana-agent-flow/windows/install_script.nsis @@ -152,10 +152,14 @@ Function InitializeRegistry Pop $0 # Ignore return result ${EndIf} - # Define the environment key, which holds environment variables to pass to the - # service. - nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Environment /t REG_MULTI_SZ /d "$Environment"' - Pop $0 # Ignore return result + nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /v Environment' + Pop $0 + ${If} $0 == 1 + # Define the environment key, which holds environment variables to pass to the + # service. + nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Environment /t REG_MULTI_SZ /d "$Environment"' + Pop $0 # Ignore return result + ${EndIf} Return FunctionEnd diff --git a/pkg/flow/declare_test.go b/pkg/flow/declare_test.go new file mode 100644 index 000000000000..6f04bc16b356 --- /dev/null +++ b/pkg/flow/declare_test.go @@ -0,0 +1,513 @@ +package flow_test + +import ( + "context" + "os" + "regexp" + "testing" + "time" + + "github.com/grafana/agent/pkg/flow" + "github.com/grafana/agent/pkg/flow/internal/testcomponents" + "github.com/grafana/agent/pkg/flow/logging" + "github.com/grafana/agent/service" + "github.com/stretchr/testify/require" +) + +type testCase struct { + name string + config string + expected int +} + +func TestDeclare(t *testing.T) { + tt := []testCase{ + { + name: "BasicDeclare", + config: ` + declare "test" { + argument "input" { + optional = false + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } + } + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + test "myModule" { + input = testcomponents.count.inc.count + } + + testcomponents.summation "sum" { + input = test.myModule.output + } + `, + expected: 10, + }, + { + name: "NestedDeclares", + config: ` + declare "test" { + argument "input" { + optional = false + } + declare "nested" { + argument "input" { + optional = false + } + export "output" { + value = argument.input.value + } + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + nested "default" { + input = testcomponents.passthrough.pt.output + } + + export "output" { + value = nested.default.output + } + } + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + test "myModule" { + input = testcomponents.count.inc.count + } + + testcomponents.summation "sum" { + input = test.myModule.output + } + `, + expected: 10, + }, + { + name: "DeclaredInParentDepth1", + config: ` + declare "test" { + argument "input" { + optional = false + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + rootDeclare "default" { + input = testcomponents.passthrough.pt.output + } + + export "output" { + value = rootDeclare.default.output + } + } + declare "rootDeclare" { + argument "input" { + optional = false + } + export "output" { + value = argument.input.value + } + } + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + test "myModule" { + input = testcomponents.count.inc.count + } + + testcomponents.summation "sum" { + input = test.myModule.output + } + `, + expected: 10, + }, + { + name: "DeclaredInParentDepth2", + config: ` + declare "test" { + argument "input" { + optional = false + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + declare "anotherDeclare" { + argument "input" { + optional = false + } + rootDeclare "default" { + input = argument.input.value + } + export "output" { + value = rootDeclare.default.output + } + } + anotherDeclare "myOtherDeclare" { + input = testcomponents.passthrough.pt.output + } + + export "output" { + value = anotherDeclare.myOtherDeclare.output + } + } + declare "rootDeclare" { + argument "input" { + optional = false + } + export "output" { + value = argument.input.value + } + } + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + test "myModule" { + input = testcomponents.count.inc.count + } + + testcomponents.summation "sum" { + input = test.myModule.output + } + `, + expected: 10, + }, + { + name: "ShadowNamespace", + config: ` + declare "prometheus" { + argument "input" { + optional = false + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } + } + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + prometheus "myModule" { + input = testcomponents.count.inc.count + } + + testcomponents.summation "sum" { + input = prometheus.myModule.output + } + `, + expected: 10, + }, + { + name: "ShadowDeclare", + config: ` + declare "a" { + argument "input" { + optional = false + } + export "output" { + value = argument.input.value + } + } + + declare "test" { + // redeclare "a" + declare "a" { + export "output" { + value = -10 + } + } + + a "default" {} + + export "output" { + value = a.default.output + } + } + test "myModule" {} + + testcomponents.summation "sum" { + input = test.myModule.output + } + `, + expected: -10, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + ctrl := flow.New(testOptions(t)) + f, err := flow.ParseSource(t.Name(), []byte(tc.config)) + require.NoError(t, err) + require.NotNil(t, f) + + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + ctrl.Run(ctx) + close(done) + }() + defer func() { + cancel() + <-done + }() + + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum") + return export.LastAdded == tc.expected + }, 3*time.Second, 10*time.Millisecond) + }) + } +} + +type errorTestCase struct { + name string + config string + expectedError *regexp.Regexp +} + +func TestDeclareError(t *testing.T) { + tt := []errorTestCase{ + { + name: "CircleDependencyBetweenDeclares", + config: ` + declare "a" { + b "t1" {} + } + declare "b" { + a "t2" {} + } + a "t3" {} + `, + // using regex here because the order of the node can vary + // not ideal because it could technically match "a" and "a" + expectedError: regexp.MustCompile(`cycle: declare\.(a|b), declare\.(a|b)`), + }, + { + name: "CircleDependencyWithinDeclare", + config: ` + declare "a" { + declare "b" { + c "t1" {} + } + declare "c" { + b "t2" {} + } + b "t3" {} + } + a "t4" {} + `, + expectedError: regexp.MustCompile(`cycle: declare\.(b|c), declare\.(b|c)`), + }, + { + name: "CircleDependencyWithItself", + config: ` + declare "a" { + a "t1" {} + } + a "t2" {} + `, + expectedError: regexp.MustCompile(`self reference: declare\.a`), + }, + { + name: "OutOfScopeReference", + config: ` + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + declare "example_a" { + testcomponents.summation "sum" { + input = testcomponents.count.inc.count // invalid reference + } + } + example_a "test" {} + `, + expectedError: regexp.MustCompile(`component "testcomponents.count.inc.count" does not exist or is out of scope`), + }, + { + name: "OutOfScopeDefinition", + config: ` + declare "a" { + b_1 "default" { } // this should error + } + declare "b" { + declare "b_1" {} + } + a "example" {} + `, + expectedError: regexp.MustCompile(`cannot retrieve the definition of component name "b_1"`), + }, + { + name: "ForbiddenDeclareLabel", + config: ` + declare "declare" {} + `, + expectedError: regexp.MustCompile(`'declare' is not a valid label for a declare block`), + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + defer verifyNoGoroutineLeaks(t) + s, err := logging.New(os.Stderr, logging.DefaultOptions) + require.NoError(t, err) + ctrl := flow.New(flow.Options{ + Logger: s, + DataPath: t.TempDir(), + Reg: nil, + Services: []service.Service{}, + }) + f, err := flow.ParseSource(t.Name(), []byte(tc.config)) + require.NoError(t, err) + require.NotNil(t, f) + + err = ctrl.LoadSource(f, nil) + if err == nil { + t.Errorf("Expected error to match regex %q, but got: nil", tc.expectedError) + } else if !tc.expectedError.MatchString(err.Error()) { + t.Errorf("Expected error to match regex %q, but got: %v", tc.expectedError, err) + } + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + ctrl.Run(ctx) + close(done) + }() + cancel() + <-done + }) + } +} + +type testCaseUpdateConfig struct { + name string + config string + newConfig string + expected int + newExpected int +} + +func TestDeclareUpdateConfig(t *testing.T) { + tt := []testCaseUpdateConfig{ + { + name: "UpdateDeclare", + config: ` + declare "test" { + argument "input" { + optional = false + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } + } + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + test "myModule" { + input = testcomponents.count.inc.count + } + + testcomponents.summation "sum" { + input = test.myModule.output + } + `, + newConfig: ` + declare "test" { + export "output" { + value = -10 + } + } + + test "myModule" {} + + testcomponents.summation "sum" { + input = test.myModule.output + } + `, + expected: 10, + newExpected: -10, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + ctrl := flow.New(testOptions(t)) + f, err := flow.ParseSource(t.Name(), []byte(tc.config)) + require.NoError(t, err) + require.NotNil(t, f) + + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + ctrl.Run(ctx) + close(done) + }() + defer func() { + cancel() + <-done + }() + + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum") + return export.LastAdded == tc.expected + }, 3*time.Second, 10*time.Millisecond) + + f, err = flow.ParseSource(t.Name(), []byte(tc.newConfig)) + require.NoError(t, err) + require.NotNil(t, f) + + // Reload the controller with the new config. + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum") + return export.LastAdded == tc.newExpected + }, 3*time.Second, 10*time.Millisecond) + }) + } +} diff --git a/pkg/flow/flow.go b/pkg/flow/flow.go index 76d62bdb9cb2..2f1deb73660f 100644 --- a/pkg/flow/flow.go +++ b/pkg/flow/flow.go @@ -27,21 +27,21 @@ // when evaluating the configuration for a component will always be reported as // unhealthy until the next successful evaluation. // -// # Component Evaluation +// # Node Evaluation // -// The process of converting the River block associated with a component into -// the appropriate Go struct is called "component evaluation." +// The process of converting the River block associated with a node into +// the appropriate Go struct is called "node evaluation." // -// Components are only evaluated after all components they reference have been +// Nodes are only evaluated after all nodes they reference have been // evaluated; cyclic dependencies are invalid. // -// If a component updates its Exports at runtime, other components which directly -// or indirectly reference the updated component will have their Arguments +// If a node updates its Exports at runtime, other nodes which directly +// or indirectly reference the updated node will have their Arguments // re-evaluated. // -// The arguments and exports for a component will be left in their last valid -// state if a component shuts down or is given an invalid config. This prevents -// a domino effect of a single failed component taking down other components +// The arguments and exports for a node will be left in their last valid +// state if a node shuts down or is given an invalid config. This prevents +// a domino effect of a single failed node taking down other node // which are otherwise healthy. package flow @@ -49,6 +49,7 @@ import ( "context" "fmt" "sync" + "time" "github.com/grafana/agent/pkg/flow/internal/controller" "github.com/grafana/agent/pkg/flow/internal/worker" @@ -185,9 +186,9 @@ func newController(o controllerOptions) *Flow { Logger: log, TraceProvider: tracer, DataPath: o.DataPath, - OnComponentUpdate: func(cn *controller.ComponentNode) { - // Changed components should be queued for reevaluation. - f.updateQueue.Enqueue(cn) + OnBlockNodeUpdate: func(cn controller.BlockNode) { + // Changed node should be queued for reevaluation. + f.updateQueue.Enqueue(&controller.QueuedNode{Node: cn, LastUpdatedTime: time.Now()}) }, OnExportsChange: o.OnExportsChange, Registerer: o.Reg, @@ -236,8 +237,8 @@ func (f *Flow) Run(ctx context.Context) { return case <-f.updateQueue.Chan(): - // Evaluate all components that have been updated. Sending the entire batch together will improve - // throughput - it prevents the situation where two components have the same dependency, and the first time + // Evaluate all nodes that have been updated. Sending the entire batch together will improve + // throughput - it prevents the situation where two nodes have the same dependency, and the first time // it's picked up by the worker pool and the second time it's enqueued again, resulting in more evaluations. all := f.updateQueue.DequeueAll() f.loader.EvaluateDependants(ctx, all) @@ -276,11 +277,25 @@ func (f *Flow) Run(ctx context.Context) { // // The controller will only start running components after Load is called once // without any configuration errors. +// LoadSource uses default loader configuration. func (f *Flow) LoadSource(source *Source, args map[string]any) error { + return f.loadSource(source, args, nil) +} + +// Same as above but with a customComponentRegistry that provides custom component definitions. +func (f *Flow) loadSource(source *Source, args map[string]any, customComponentRegistry *controller.CustomComponentRegistry) error { f.loadMut.Lock() defer f.loadMut.Unlock() - diags := f.loader.Apply(args, source.components, source.configBlocks) + applyOptions := controller.ApplyOptions{ + Args: args, + ComponentBlocks: source.components, + ConfigBlocks: source.configBlocks, + DeclareBlocks: source.declareBlocks, + CustomComponentRegistry: customComponentRegistry, + } + + diags := f.loader.Apply(applyOptions) if !f.loadedOnce.Load() && diags.HasErrors() { // The first call to Load should not run any components if there were // errors in the configuration file. diff --git a/pkg/flow/flow_components.go b/pkg/flow/flow_components.go index 0899971339b7..c78542741e85 100644 --- a/pkg/flow/flow_components.go +++ b/pkg/flow/flow_components.go @@ -29,7 +29,7 @@ func (f *Flow) GetComponent(id component.ID, opts component.InfoOptions) (*compo return nil, component.ErrComponentNotFound } - cn, ok := node.(*controller.ComponentNode) + cn, ok := node.(controller.ComponentNode) if !ok { return nil, fmt.Errorf("%q is not a component", id) } @@ -63,11 +63,11 @@ func (f *Flow) ListComponents(moduleID string, opts component.InfoOptions) ([]*c return detail, nil } -func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph, opts component.InfoOptions) *component.Info { +func (f *Flow) getComponentDetail(cn controller.ComponentNode, graph *dag.Graph, opts component.InfoOptions) *component.Info { var references, referencedBy []string // Skip over any edge which isn't between two component nodes. This is a - // temporary workaround needed until there's athe concept of configuration + // temporary workaround needed until there's a concept of configuration // blocks in the API. // // Without this change, the graph fails to render when a configuration @@ -75,12 +75,12 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph // // TODO(rfratto): add support for config block nodes in the API and UI. for _, dep := range graph.Dependencies(cn) { - if _, ok := dep.(*controller.ComponentNode); ok { + if _, ok := dep.(controller.ComponentNode); ok { references = append(references, dep.NodeID()) } } for _, dep := range graph.Dependants(cn) { - if _, ok := dep.(*controller.ComponentNode); ok { + if _, ok := dep.(controller.ComponentNode); ok { referencedBy = append(referencedBy, dep.NodeID()) } } @@ -90,7 +90,6 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph health component.Health arguments component.Arguments exports component.Exports - debugInfo interface{} ) if opts.GetHealth { @@ -102,14 +101,8 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph if opts.GetExports { exports = cn.Exports() } - if opts.GetDebugInfo { - debugInfo = cn.DebugInfo() - } - - return &component.Info{ - Component: cn.Component(), - ModuleIDs: cn.ModuleIDs(), + componentInfo := &component.Info{ ID: component.ID{ ModuleID: f.opts.ControllerID, LocalID: cn.NodeID(), @@ -119,11 +112,20 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph References: references, ReferencedBy: referencedBy, - Registration: cn.Registration(), - Health: health, + ComponentName: cn.ComponentName(), + Health: health, Arguments: arguments, Exports: exports, - DebugInfo: debugInfo, + + ModuleIDs: cn.ModuleIDs(), + } + + if builtinComponent, ok := cn.(*controller.BuiltinComponentNode); ok { + componentInfo.Component = builtinComponent.Component() + if opts.GetDebugInfo { + componentInfo.DebugInfo = builtinComponent.DebugInfo() + } } + return componentInfo } diff --git a/pkg/flow/flow_test.go b/pkg/flow/flow_test.go index 590f97a424f1..42f5a6077e06 100644 --- a/pkg/flow/flow_test.go +++ b/pkg/flow/flow_test.go @@ -59,7 +59,7 @@ func getFields(t *testing.T, g *dag.Graph, nodeID string) (component.Arguments, n := g.GetByID(nodeID) require.NotNil(t, n, "couldn't find node %q in graph", nodeID) - uc := n.(*controller.ComponentNode) + uc := n.(*controller.BuiltinComponentNode) return uc.Arguments(), uc.Exports() } diff --git a/pkg/flow/internal/controller/component_node.go b/pkg/flow/internal/controller/component_node.go new file mode 100644 index 000000000000..1a6e41605316 --- /dev/null +++ b/pkg/flow/internal/controller/component_node.go @@ -0,0 +1,35 @@ +package controller + +import ( + "github.com/grafana/agent/component" + "github.com/grafana/river/ast" +) + +// ComponentNode is a generic representation of a Flow component. +type ComponentNode interface { + RunnableNode + + // CurrentHealth returns the current health of the component. + CurrentHealth() component.Health + + // Arguments returns the current arguments of the managed component. + Arguments() component.Arguments + + // Exports returns the current set of exports from the managed component. + Exports() component.Exports + + // Label returns the component label. + Label() string + + // ComponentName returns the name of the component. + ComponentName() string + + // ID returns the component ID of the managed component from its River block. + ID() ComponentID + + // UpdateBlock updates the River block used to construct arguments for the managed component. + UpdateBlock(b *ast.BlockStmt) + + // ModuleIDs returns the current list of modules managed by the component. + ModuleIDs() []string +} diff --git a/pkg/flow/internal/controller/component_node_manager.go b/pkg/flow/internal/controller/component_node_manager.go new file mode 100644 index 000000000000..152823da4ca5 --- /dev/null +++ b/pkg/flow/internal/controller/component_node_manager.go @@ -0,0 +1,76 @@ +package controller + +import ( + "fmt" + "sync" + + "github.com/grafana/river/ast" +) + +// ComponentNodeManager is responsible for creating new component nodes and +// obtaining the necessary information to run them. +type ComponentNodeManager struct { + globals ComponentGlobals + // builtinComponentReg returns information to build and run built-in components. + builtinComponentReg ComponentRegistry + + mut sync.RWMutex + // customComponentReg returns information to build and run custom components. + customComponentReg *CustomComponentRegistry +} + +// NewComponentNodeManager creates a new ComponentNodeManager without custom component registry. +func NewComponentNodeManager(globals ComponentGlobals, componentReg ComponentRegistry) *ComponentNodeManager { + return &ComponentNodeManager{ + globals: globals, + builtinComponentReg: componentReg, + } +} + +// CreateComponentNode creates a new builtin component or a new custom component. +func (m *ComponentNodeManager) createComponentNode(componentName string, block *ast.BlockStmt) (ComponentNode, error) { + if isCustomComponent(m.customComponentReg, block.Name[0]) { + return NewCustomComponentNode(m.globals, block, m.getCustomComponentConfig), nil + } + registration, exists := m.builtinComponentReg.Get(componentName) + if !exists { + return nil, fmt.Errorf("cannot retrieve the definition of component name %q", componentName) + } + if block.Label == "" { + return nil, fmt.Errorf("component %q must have a label", componentName) + } + return NewBuiltinComponentNode(m.globals, registration, block), nil +} + +// getCustomComponentConfig is used by the custom component to retrieve its template and the customComponentRegistry associated with it. +func (m *ComponentNodeManager) getCustomComponentConfig(componentName string) (ast.Body, *CustomComponentRegistry, error) { + m.mut.Lock() + defer m.mut.Unlock() + + template, customComponentRegistry := findLocalDeclare(m.customComponentReg, componentName) + + if customComponentRegistry == nil || template == nil { + return nil, nil, fmt.Errorf("custom component config not found in the registry, componentName: %s", componentName) + } + return template, customComponentRegistry, nil +} + +// isCustomComponent returns true if the name matches a declare in the provided custom component registry. +func isCustomComponent(reg *CustomComponentRegistry, name string) bool { + if reg == nil { + return false + } + _, declareExists := reg.declares[name] + return declareExists || isCustomComponent(reg.parent, name) +} + +// findLocalDeclare recursively searches for a declare definition in the custom component registry. +func findLocalDeclare(reg *CustomComponentRegistry, componentName string) (ast.Body, *CustomComponentRegistry) { + if declare, ok := reg.declares[componentName]; ok { + return declare, reg + } + if reg.parent != nil { + return findLocalDeclare(reg.parent, componentName) + } + return nil, nil +} diff --git a/pkg/flow/internal/controller/component_references.go b/pkg/flow/internal/controller/component_references.go index 7adc0639c664..6e9dbb3d9bc5 100644 --- a/pkg/flow/internal/controller/component_references.go +++ b/pkg/flow/internal/controller/component_references.go @@ -157,7 +157,7 @@ func resolveTraversal(t Traversal, g *dag.Graph) (Reference, diag.Diagnostics) { diags = append(diags, diag.Diagnostic{ Severity: diag.SeverityLevelError, - Message: fmt.Sprintf("component %q does not exist", partial), + Message: fmt.Sprintf("component %q does not exist or is out of scope", partial), StartPos: ast.StartPos(t[0]).Position(), EndPos: ast.StartPos(t[len(t)-1]).Position(), }) diff --git a/pkg/flow/internal/controller/custom_component_registry.go b/pkg/flow/internal/controller/custom_component_registry.go new file mode 100644 index 000000000000..dccdb776cf94 --- /dev/null +++ b/pkg/flow/internal/controller/custom_component_registry.go @@ -0,0 +1,25 @@ +package controller + +import ( + "github.com/grafana/river/ast" +) + +// CustomComponentRegistry holds custom component definitions that are available in the context. +type CustomComponentRegistry struct { + parent *CustomComponentRegistry // nil if root config + declares map[string]ast.Body // customComponentName: template +} + +// NewCustomComponentRegistry creates a new CustomComponentRegistry with a parent. +// parent can be nil. +func NewCustomComponentRegistry(parent *CustomComponentRegistry) *CustomComponentRegistry { + return &CustomComponentRegistry{ + parent: parent, + declares: make(map[string]ast.Body), + } +} + +// registerDeclare stores a local declare block. +func (s *CustomComponentRegistry) registerDeclare(declare *ast.BlockStmt) { + s.declares[declare.Label] = declare.Body +} diff --git a/pkg/flow/internal/controller/loader.go b/pkg/flow/internal/controller/loader.go index 10a6f37965ab..c9a62d8ad3b8 100644 --- a/pkg/flow/internal/controller/loader.go +++ b/pkg/flow/internal/controller/loader.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "path" + "strings" "sync" "time" @@ -24,23 +26,24 @@ import ( // The Loader builds and evaluates ComponentNodes from River blocks. type Loader struct { - log log.Logger - tracer trace.TracerProvider - globals ComponentGlobals - services []service.Service - host service.Host - componentReg ComponentRegistry - workerPool worker.Pool + log log.Logger + tracer trace.TracerProvider + globals ComponentGlobals + services []service.Service + host service.Host + workerPool worker.Pool // backoffConfig is used to backoff when an updated component's dependencies cannot be submitted to worker // pool for evaluation in EvaluateDependants, because the queue is full. This is an unlikely scenario, but when // it happens we should avoid retrying too often to give other goroutines a chance to progress. Having a backoff // also prevents log spamming with errors. - backoffConfig backoff.Config + backoffConfig backoff.Config + componentNodeManager *ComponentNodeManager mut sync.RWMutex graph *dag.Graph originalGraph *dag.Graph - componentNodes []*ComponentNode + componentNodes []ComponentNode + declareNodes map[string]*DeclareNode serviceNodes []*ServiceNode cache *valueCache blocks []*ast.BlockStmt // Most recently loaded blocks, used for writing @@ -75,13 +78,14 @@ func NewLoader(opts LoaderOptions) *Loader { } l := &Loader{ - log: log.With(globals.Logger, "controller_id", globals.ControllerID), - tracer: tracing.WrapTracerForLoader(globals.TraceProvider, globals.ControllerID), - globals: globals, - services: services, - host: host, - componentReg: reg, - workerPool: opts.WorkerPool, + log: log.With(globals.Logger, "controller_id", globals.ControllerID), + tracer: tracing.WrapTracerForLoader(globals.TraceProvider, globals.ControllerID), + globals: globals, + services: services, + host: host, + workerPool: opts.WorkerPool, + + componentNodeManager: NewComponentNodeManager(globals, reg), // This is a reasonable default which should work for most cases. If a component is completely stuck, we would // retry and log an error every 10 seconds, at most. @@ -105,6 +109,21 @@ func NewLoader(opts LoaderOptions) *Loader { return l } +// ApplyOptions are options that can be provided when loading a new River config. +type ApplyOptions struct { + Args map[string]any // input values of a module (nil for the root module) + + // TODO: rename ComponentBlocks because it also contains services + ComponentBlocks []*ast.BlockStmt // pieces of config that can be used to instantiate builtin components and services + ConfigBlocks []*ast.BlockStmt // pieces of config that can be used to instantiate config nodes + DeclareBlocks []*ast.BlockStmt // pieces of config that can be used as templates to instantiate custom components + + // CustomComponentRegistry holds custom component templates. + // The definition of a custom component instantiated inside of the loaded config + // should be passed via this field if it's not declared or imported in the config. + CustomComponentRegistry *CustomComponentRegistry +} + // Apply loads a new set of components into the Loader. Apply will drop any // previously loaded component which is not described in the set of River // blocks. @@ -117,26 +136,29 @@ func NewLoader(opts LoaderOptions) *Loader { // The provided parentContext can be used to provide global variables and // functions to components. A child context will be constructed from the parent // to expose values of other components. -func (l *Loader) Apply(args map[string]any, componentBlocks []*ast.BlockStmt, configBlocks []*ast.BlockStmt) diag.Diagnostics { +func (l *Loader) Apply(options ApplyOptions) diag.Diagnostics { start := time.Now() l.mut.Lock() defer l.mut.Unlock() l.cm.controllerEvaluation.Set(1) defer l.cm.controllerEvaluation.Set(0) - for key, value := range args { + for key, value := range options.Args { l.cache.CacheModuleArgument(key, value) } - l.cache.SyncModuleArgs(args) + l.cache.SyncModuleArgs(options.Args) - newGraph, diags := l.loadNewGraph(args, componentBlocks, configBlocks) + // Create a new CustomComponentRegistry based on the provided one. + // The provided one should be nil for the root config. + l.componentNodeManager.customComponentReg = NewCustomComponentRegistry(options.CustomComponentRegistry) + newGraph, diags := l.loadNewGraph(options.Args, options.ComponentBlocks, options.ConfigBlocks, options.DeclareBlocks) if diags.HasErrors() { return diags } var ( - components = make([]*ComponentNode, 0, len(componentBlocks)) - componentIDs = make([]ComponentID, 0, len(componentBlocks)) + components = make([]ComponentNode, 0) + componentIDs = make([]ComponentID, 0) services = make([]*ServiceNode, 0, len(l.services)) ) @@ -168,7 +190,7 @@ func (l *Loader) Apply(args map[string]any, componentBlocks []*ast.BlockStmt, co var err error switch n := n.(type) { - case *ComponentNode: + case ComponentNode: components = append(components, n) componentIDs = append(componentIDs, n.ID()) @@ -231,7 +253,7 @@ func (l *Loader) Apply(args map[string]any, componentBlocks []*ast.BlockStmt, co l.serviceNodes = services l.graph = &newGraph l.cache.SyncIDs(componentIDs) - l.blocks = componentBlocks + l.blocks = options.ComponentBlocks if l.globals.OnExportsChange != nil && l.cache.ExportChangeIndex() != l.moduleExportIndex { l.moduleExportIndex = l.cache.ExportChangeIndex() l.globals.OnExportsChange(l.cache.CreateModuleExports()) @@ -252,7 +274,7 @@ func (l *Loader) Cleanup(stopWorkerPool bool) { } // loadNewGraph creates a new graph from the provided blocks and validates it. -func (l *Loader) loadNewGraph(args map[string]any, componentBlocks []*ast.BlockStmt, configBlocks []*ast.BlockStmt) (dag.Graph, diag.Diagnostics) { +func (l *Loader) loadNewGraph(args map[string]any, componentBlocks []*ast.BlockStmt, configBlocks []*ast.BlockStmt, declareBlocks []*ast.BlockStmt) (dag.Graph, diag.Diagnostics) { var g dag.Graph // Split component blocks into blocks for components and services. @@ -262,6 +284,10 @@ func (l *Loader) loadNewGraph(args map[string]any, componentBlocks []*ast.BlockS // block. diags := l.populateServiceNodes(&g, serviceBlocks) + // Fill our graph with declare blocks, must be added before componentNodes. + declareDiags := l.populateDeclareNodes(&g, declareBlocks) + diags = append(diags, declareDiags...) + // Fill our graph with config blocks. configBlockDiags := l.populateConfigBlockNodes(args, &g, configBlocks) diags = append(diags, configBlockDiags...) @@ -310,6 +336,36 @@ func (l *Loader) splitComponentBlocks(blocks []*ast.BlockStmt) (componentBlocks, return componentBlocks, serviceBlocks } +func (l *Loader) populateDeclareNodes(g *dag.Graph, declareBlocks []*ast.BlockStmt) diag.Diagnostics { + var diags diag.Diagnostics + l.declareNodes = map[string]*DeclareNode{} + for _, declareBlock := range declareBlocks { + if declareBlock.Label == "declare" { + diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + Message: "'declare' is not a valid label for a declare block", + StartPos: ast.StartPos(declareBlock).Position(), + EndPos: ast.EndPos(declareBlock).Position(), + }) + continue + } + // TODO: if node already exists in the graph, update the block + // instead of copying it. + node := NewDeclareNode(declareBlock) + if g.GetByID(node.NodeID()) != nil { + diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + Message: fmt.Sprintf("cannot add declare node %q; node with same ID already exists", node.NodeID()), + }) + continue + } + l.componentNodeManager.customComponentReg.registerDeclare(declareBlock) + l.declareNodes[node.label] = node + g.Add(node) + } + return diags +} + // populateServiceNodes adds service nodes to the graph. func (l *Loader) populateServiceNodes(g *dag.Graph, serviceBlocks []*ast.BlockStmt) diag.Diagnostics { var diags diag.Diagnostics @@ -344,6 +400,17 @@ func (l *Loader) populateServiceNodes(g *dag.Graph, serviceBlocks []*ast.BlockSt // Now, assign blocks to services. for _, block := range serviceBlocks { blockID := BlockComponentID(block).String() + + if l.isModule() { + diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + Message: fmt.Sprintf("service blocks not allowed inside a module: %q", blockID), + StartPos: ast.StartPos(block).Position(), + EndPos: ast.EndPos(block).Position(), + }) + continue + } + node := g.GetByID(blockID).(*ServiceNode) // Blocks assigned to services are reset to nil in the previous loop. @@ -413,6 +480,8 @@ func (l *Loader) populateConfigBlockNodes(args map[string]any, g *dag.Graph, con g.Add(c) } + // TODO: set import config nodes form the nodeMap to the importConfigNodes field of the loader. + return diags } @@ -423,7 +492,6 @@ func (l *Loader) populateComponentNodes(g *dag.Graph, componentBlocks []*ast.Blo blockMap = make(map[string]*ast.BlockStmt, len(componentBlocks)) ) for _, block := range componentBlocks { - var c *ComponentNode id := BlockComponentID(block).String() if orig, redefined := blockMap[id]; redefined { @@ -437,39 +505,26 @@ func (l *Loader) populateComponentNodes(g *dag.Graph, componentBlocks []*ast.Blo } blockMap[id] = block - // Check the graph from the previous call to Load to see we can copy an + // Check the graph from the previous call to Load to see if we can copy an // existing instance of ComponentNode. if exist := l.graph.GetByID(id); exist != nil { - c = exist.(*ComponentNode) + c := exist.(ComponentNode) c.UpdateBlock(block) + g.Add(c) } else { componentName := block.GetBlockName() - registration, exists := l.componentReg.Get(componentName) - if !exists { - diags.Add(diag.Diagnostic{ - Severity: diag.SeverityLevelError, - Message: fmt.Sprintf("Unrecognized component name %q", componentName), - StartPos: block.NamePos.Position(), - EndPos: block.NamePos.Add(len(componentName) - 1).Position(), - }) - continue - } - - if block.Label == "" { + c, err := l.componentNodeManager.createComponentNode(componentName, block) + if err != nil { diags.Add(diag.Diagnostic{ Severity: diag.SeverityLevelError, - Message: fmt.Sprintf("Component %q must have a label", componentName), + Message: err.Error(), StartPos: block.NamePos.Position(), EndPos: block.NamePos.Add(len(componentName) - 1).Position(), }) continue } - - // Create a new component - c = NewComponentNode(l.globals, registration, block) + g.Add(c) } - - g.Add(c) } return diags @@ -480,7 +535,6 @@ func (l *Loader) wireGraphEdges(g *dag.Graph) diag.Diagnostics { var diags diag.Diagnostics for _, n := range g.Nodes() { - // First, wire up dependencies on services. switch n := n.(type) { case *ServiceNode: // Service depending on other services. for _, depName := range n.Definition().DependsOn { @@ -495,6 +549,22 @@ func (l *Loader) wireGraphEdges(g *dag.Graph) diag.Diagnostics { g.AddEdge(dag.Edge{From: n, To: dep}) } + case *DeclareNode: + // Although they do nothing on evaluation, DeclareNodes are wired + // to detect cyclic dependencies. If a declare "a" block contains an instance + // of a declare "b" which contains an instance of the declare "a", both DeclareNodes + // will depend on each others, creating a cycle in the graph which will be detected later. + // Example: declare "a"{b "default"{}} declare "b"{a "default"{}} + // It also covers self-dependency. + // Example: declare "a"{a "default"{}} + refs := l.findCustomComponentReferences(n.Block()) + for ref := range refs { + g.AddEdge(dag.Edge{From: n, To: ref}) + } + // skip here because for now Declare nodes can't reference component nodes. + continue + case *CustomComponentNode: + l.wireCustomComponentNode(g, n) } // Finally, wire component references. @@ -508,6 +578,17 @@ func (l *Loader) wireGraphEdges(g *dag.Graph) diag.Diagnostics { return diags } +// wireCustomComponentNode wires a custom component to the import/declare nodes that it depends on. +func (l *Loader) wireCustomComponentNode(g *dag.Graph, cc *CustomComponentNode) { + if declare, ok := l.declareNodes[cc.componentName]; ok { + refs := l.findCustomComponentReferences(declare.Block()) + for ref := range refs { + // add edges between the custom component and declare/import nodes. + g.AddEdge(dag.Edge{From: cc, To: ref}) + } + } +} + // Variables returns the Variables the Loader exposes for other Flow components // to reference. func (l *Loader) Variables() map[string]interface{} { @@ -515,7 +596,7 @@ func (l *Loader) Variables() map[string]interface{} { } // Components returns the current set of loaded components. -func (l *Loader) Components() []*ComponentNode { +func (l *Loader) Components() []ComponentNode { l.mut.RLock() defer l.mut.RUnlock() return l.componentNodes @@ -543,13 +624,13 @@ func (l *Loader) OriginalGraph() *dag.Graph { return l.originalGraph.Clone() } -// EvaluateDependants sends components which depend directly on components in updatedNodes for evaluation to the -// workerPool. It should be called whenever components update their exports. -// It is beneficial to call EvaluateDependants with a batch of components, as it will enqueue the entire batch before +// EvaluateDependants sends nodes which depend directly on nodes in updatedNodes for evaluation to the +// workerPool. It should be called whenever nodes update their exports. +// It is beneficial to call EvaluateDependants with a batch of nodes, as it will enqueue the entire batch before // the worker pool starts to evaluate them, resulting in smaller number of total evaluations when // node updates are frequent. If the worker pool's queue is full, EvaluateDependants will retry with a backoff until // it succeeds or until the ctx is cancelled. -func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*ComponentNode) { +func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*QueuedNode) { if len(updatedNodes) == 0 { return } @@ -565,12 +646,15 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone l.mut.RLock() defer l.mut.RUnlock() - dependenciesToParentsMap := make(map[dag.Node]*ComponentNode) + dependenciesToParentsMap := make(map[dag.Node]*QueuedNode) for _, parent := range updatedNodes { - // Make sure we're in-sync with the current exports of parent. - l.cache.CacheExports(parent.ID(), parent.Exports()) + switch parentNode := parent.Node.(type) { + case ComponentNode: + // Make sure we're in-sync with the current exports of parent. + l.cache.CacheExports(parentNode.ID(), parentNode.Exports()) + } // We collect all nodes directly incoming to parent. - _ = dag.WalkIncomingNodes(l.graph, parent, func(n dag.Node) error { + _ = dag.WalkIncomingNodes(l.graph, parent.Node, func(n dag.Node) error { dependenciesToParentsMap[n] = parent return nil }) @@ -583,7 +667,7 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone for n, parent := range dependenciesToParentsMap { dependantCtx, span := tracer.Start(spanCtx, "SubmitForEvaluation", trace.WithSpanKind(trace.SpanKindInternal)) span.SetAttributes(attribute.String("node_id", n.NodeID())) - span.SetAttributes(attribute.String("originator_id", parent.NodeID())) + span.SetAttributes(attribute.String("originator_id", parent.Node.NodeID())) // Submit for asynchronous evaluation with retries and backoff. Don't use range variables in the closure. var ( @@ -592,7 +676,8 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone err error ) for retryBackoff.Ongoing() { - err = l.workerPool.SubmitWithKey(nodeRef.NodeID(), func() { + globalUniqueKey := path.Join(l.globals.ControllerID, nodeRef.NodeID()) + err = l.workerPool.SubmitWithKey(globalUniqueKey, func() { l.concurrentEvalFn(nodeRef, dependantCtx, tracer, parentRef) }) if err != nil { @@ -601,7 +686,7 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone "and cannot keep up with evaluating components - will retry", "err", err, "node_id", n.NodeID(), - "originator_id", parent.NodeID(), + "originator_id", parent.Node.NodeID(), "retries", retryBackoff.NumRetries(), ) retryBackoff.Wait() @@ -624,9 +709,9 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone // concurrentEvalFn returns a function that evaluates a node and updates the cache. This function can be submitted to // a worker pool for asynchronous evaluation. -func (l *Loader) concurrentEvalFn(n dag.Node, spanCtx context.Context, tracer trace.Tracer, parent *ComponentNode) { +func (l *Loader) concurrentEvalFn(n dag.Node, spanCtx context.Context, tracer trace.Tracer, parent *QueuedNode) { start := time.Now() - l.cm.dependenciesWaitTime.Observe(time.Since(parent.lastUpdateTime.Load()).Seconds()) + l.cm.dependenciesWaitTime.Observe(time.Since(parent.LastUpdatedTime).Seconds()) _, span := tracer.Start(spanCtx, "EvaluateNode", trace.WithSpanKind(trace.SpanKindInternal)) span.SetAttributes(attribute.String("node_id", n.NodeID())) defer span.End() @@ -687,7 +772,7 @@ func (l *Loader) evaluate(logger log.Logger, bn BlockNode) error { // mut must be held when calling postEvaluate. func (l *Loader) postEvaluate(logger log.Logger, bn BlockNode, err error) error { switch c := bn.(type) { - case *ComponentNode: + case ComponentNode: // Always update the cache both the arguments and exports, since both might // change when a component gets re-evaluated. We also want to cache the arguments and exports in case of an error l.cache.CacheArguments(c.ID(), c.Arguments()) @@ -728,3 +813,33 @@ func (l *Loader) isModule() bool { // Either 1 of these checks is technically sufficient but let's be extra careful. return l.globals.OnExportsChange != nil && l.globals.ControllerID != "" } + +// findCustomComponentReferences returns references to import/declare nodes in a declare block. +func (l *Loader) findCustomComponentReferences(declare *ast.BlockStmt) map[BlockNode]struct{} { + uniqueReferences := make(map[BlockNode]struct{}) + l.collectCustomComponentReferences(declare.Body, uniqueReferences) + return uniqueReferences +} + +// collectCustomComponentDependencies recursively collects references to declare nodes through an AST body. +func (l *Loader) collectCustomComponentReferences(stmts ast.Body, uniqueReferences map[BlockNode]struct{}) { + for _, stmt := range stmts { + blockStmt, ok := stmt.(*ast.BlockStmt) + if !ok { + continue + } + + var ( + componentName = strings.Join(blockStmt.Name, ".") + + declareNode, foundDeclare = l.declareNodes[blockStmt.Name[0]] + ) + + switch { + case componentName == "declare": + l.collectCustomComponentReferences(blockStmt.Body, uniqueReferences) + case foundDeclare: + uniqueReferences[declareNode] = struct{}{} + } + } +} diff --git a/pkg/flow/internal/controller/loader_test.go b/pkg/flow/internal/controller/loader_test.go index e93f757b1a2f..703ad1480e65 100644 --- a/pkg/flow/internal/controller/loader_test.go +++ b/pkg/flow/internal/controller/loader_test.go @@ -73,7 +73,7 @@ func TestLoader(t *testing.T) { Logger: l, TraceProvider: noop.NewTracerProvider(), DataPath: t.TempDir(), - OnComponentUpdate: func(cn *controller.ComponentNode) { /* no-op */ }, + OnBlockNodeUpdate: func(cn controller.BlockNode) { /* no-op */ }, Registerer: prometheus.NewRegistry(), NewModuleController: func(id string) controller.ModuleController { return nil @@ -129,7 +129,18 @@ func TestLoader(t *testing.T) { ` l := controller.NewLoader(newLoaderOptions()) diags := applyFromContent(t, l, []byte(invalidFile), nil) - require.ErrorContains(t, diags.ErrorOrNil(), `Unrecognized component name "doesnotexist`) + require.ErrorContains(t, diags.ErrorOrNil(), `cannot retrieve the definition of component name "doesnotexist`) + }) + + t.Run("Load with component with empty label", func(t *testing.T) { + invalidFile := ` + testcomponents.tick "" { + frequency = "1s" + } + ` + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, []byte(invalidFile), nil) + require.ErrorContains(t, diags.ErrorOrNil(), `component "testcomponents.tick" must have a label`) }) t.Run("Partial load with invalid reference", func(t *testing.T) { @@ -207,7 +218,7 @@ func TestScopeWithFailingComponent(t *testing.T) { Logger: l, TraceProvider: noop.NewTracerProvider(), DataPath: t.TempDir(), - OnComponentUpdate: func(cn *controller.ComponentNode) { /* no-op */ }, + OnBlockNodeUpdate: func(cn controller.BlockNode) { /* no-op */ }, Registerer: prometheus.NewRegistry(), NewModuleController: func(id string) controller.ModuleController { return fakeModuleController{} @@ -230,6 +241,7 @@ func applyFromContent(t *testing.T, l *controller.Loader, componentBytes []byte, diags diag.Diagnostics componentBlocks []*ast.BlockStmt configBlocks []*ast.BlockStmt = nil + declareBlocks []*ast.BlockStmt = nil ) componentBlocks, diags = fileToBlock(t, componentBytes) @@ -244,7 +256,13 @@ func applyFromContent(t *testing.T, l *controller.Loader, componentBytes []byte, } } - applyDiags := l.Apply(nil, componentBlocks, configBlocks) + applyOptions := controller.ApplyOptions{ + ComponentBlocks: componentBlocks, + ConfigBlocks: configBlocks, + DeclareBlocks: declareBlocks, + } + + applyDiags := l.Apply(applyOptions) diags = append(diags, applyDiags...) return diags @@ -318,3 +336,7 @@ func (f fakeModuleController) ModuleIDs() []string { func (f fakeModuleController) ClearModuleIDs() { } + +func (f fakeModuleController) NewCustomComponent(id string, export component.ExportFunc) (controller.CustomComponent, error) { + return nil, nil +} diff --git a/pkg/flow/internal/controller/metrics.go b/pkg/flow/internal/controller/metrics.go index 1c5a558ccc1b..40698529b16d 100644 --- a/pkg/flow/internal/controller/metrics.go +++ b/pkg/flow/internal/controller/metrics.go @@ -112,7 +112,9 @@ func (cc *controllerCollector) Collect(ch chan<- prometheus.Metric) { for _, component := range cc.l.Components() { health := component.CurrentHealth().Health.String() componentsByHealth[health]++ - component.registry.Collect(ch) + if builtinComponent, ok := component.(*BuiltinComponentNode); ok { + builtinComponent.registry.Collect(ch) + } } for health, count := range componentsByHealth { diff --git a/pkg/flow/internal/controller/module.go b/pkg/flow/internal/controller/module.go index a672763cf79a..72454dc10cd1 100644 --- a/pkg/flow/internal/controller/module.go +++ b/pkg/flow/internal/controller/module.go @@ -1,6 +1,11 @@ package controller -import "github.com/grafana/agent/component" +import ( + "context" + + "github.com/grafana/agent/component" + "github.com/grafana/river/ast" +) // ModuleController is a lower-level interface for module controllers which // allows probing for the list of managed modules. @@ -9,4 +14,22 @@ type ModuleController interface { // ModuleIDs returns the list of managed modules in unspecified order. ModuleIDs() []string + + // Creates a new custom component. + NewCustomComponent(id string, export component.ExportFunc) (CustomComponent, error) +} + +// CustomComponent is a controller for running components within a CustomComponent. +type CustomComponent interface { + // LoadBody loads a River AST body into the CustomComponent. LoadBody can be called + // multiple times, and called prior to [CustomComponent.Run]. + // customComponentRegistry provides custom component definitions for the loaded config. + LoadBody(body ast.Body, args map[string]any, customComponentRegistry *CustomComponentRegistry) error + + // Run starts the CustomComponent. No components within the CustomComponent + // will be run until Run is called. + // + // Run blocks until the provided context is canceled. The ID of a CustomComponent as defined in + // ModuleController.NewCustomComponent will not be released until Run returns. + Run(context.Context) error } diff --git a/pkg/flow/internal/controller/node_component.go b/pkg/flow/internal/controller/node_builtin_component.go similarity index 82% rename from pkg/flow/internal/controller/node_component.go rename to pkg/flow/internal/controller/node_builtin_component.go index b99597809d4b..6de912418436 100644 --- a/pkg/flow/internal/controller/node_component.go +++ b/pkg/flow/internal/controller/node_builtin_component.go @@ -21,7 +21,6 @@ import ( "github.com/grafana/river/vm" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/trace" - "go.uber.org/atomic" ) // ComponentID is a fully-qualified name of a component. Each element in @@ -60,13 +59,13 @@ func (id ComponentID) Equals(other ComponentID) bool { // DialFunc is a function to establish a network connection. type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) -// ComponentGlobals are used by ComponentNodes to build managed components. All -// ComponentNodes should use the same ComponentGlobals. +// ComponentGlobals are used by BuiltinComponentNodes to build managed components. All +// BuiltinComponentNodes should use the same ComponentGlobals. type ComponentGlobals struct { Logger *logging.Logger // Logger shared between all managed components. TraceProvider trace.TracerProvider // Tracer shared between all managed components. DataPath string // Shared directory where component data may be stored - OnComponentUpdate func(cn *ComponentNode) // Informs controller that we need to reevaluate + OnBlockNodeUpdate func(cn BlockNode) // Informs controller that we need to reevaluate OnExportsChange func(exports map[string]any) // Invoked when the managed component updated its exports Registerer prometheus.Registerer // Registerer for serving agent and component metrics ControllerID string // ID of controller. @@ -74,12 +73,12 @@ type ComponentGlobals struct { GetServiceData func(name string) (interface{}, error) // Get data for a service. } -// ComponentNode is a controller node which manages a user-defined component. +// BuiltinComponentNode is a controller node which manages a builtin component. // -// ComponentNode manages the underlying component and caches its current -// arguments and exports. ComponentNode manages the arguments for the component +// BuiltinComponentNode manages the underlying component and caches its current +// arguments and exports. BuiltinComponentNode manages the arguments for the component // from a River block. -type ComponentNode struct { +type BuiltinComponentNode struct { id ComponentID globalID string label string @@ -90,8 +89,7 @@ type ComponentNode struct { registry *prometheus.Registry exportsType reflect.Type moduleController ModuleController - OnComponentUpdate func(cn *ComponentNode) // Informs controller that we need to reevaluate - lastUpdateTime atomic.Time + OnBlockNodeUpdate func(cn BlockNode) // Informs controller that we need to reevaluate mut sync.RWMutex block *ast.BlockStmt // Current River block to derive args from @@ -111,11 +109,11 @@ type ComponentNode struct { exports component.Exports // Evaluated exports for the managed component } -var _ BlockNode = (*ComponentNode)(nil) +var _ ComponentNode = (*BuiltinComponentNode)(nil) -// NewComponentNode creates a new ComponentNode from an initial ast.BlockStmt. +// NewBuiltinComponentNode creates a new BuiltinComponentNode from an initial ast.BlockStmt. // The underlying managed component isn't created until Evaluate is called. -func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *ast.BlockStmt) *ComponentNode { +func NewBuiltinComponentNode(globals ComponentGlobals, reg component.Registration, b *ast.BlockStmt) *BuiltinComponentNode { var ( id = BlockComponentID(b) nodeID = id.String() @@ -137,7 +135,7 @@ func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *a globalID = path.Join(globals.ControllerID, nodeID) } - cn := &ComponentNode{ + cn := &BuiltinComponentNode{ id: id, globalID: globalID, label: b.Label, @@ -146,7 +144,7 @@ func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *a reg: reg, exportsType: getExportsType(reg), moduleController: globals.NewModuleController(globalID), - OnComponentUpdate: globals.OnComponentUpdate, + OnBlockNodeUpdate: globals.OnBlockNodeUpdate, block: b, eval: vm.New(b.Body), @@ -163,7 +161,7 @@ func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *a return cn } -func getManagedOptions(globals ComponentGlobals, cn *ComponentNode) component.Options { +func getManagedOptions(globals ComponentGlobals, cn *BuiltinComponentNode) component.Options { cn.registry = prometheus.NewRegistry() return component.Options{ ID: cn.globalID, @@ -192,37 +190,37 @@ func getExportsType(reg component.Registration) reflect.Type { } // Registration returns the original registration of the component. -func (cn *ComponentNode) Registration() component.Registration { return cn.reg } +func (cn *BuiltinComponentNode) Registration() component.Registration { return cn.reg } // Component returns the instance of the managed component. Component may be -// nil if the ComponentNode has not been successfully evaluated yet. -func (cn *ComponentNode) Component() component.Component { +// nil if the BuiltinComponentNode has not been successfully evaluated yet. +func (cn *BuiltinComponentNode) Component() component.Component { cn.mut.RLock() defer cn.mut.RUnlock() return cn.managed } // ID returns the component ID of the managed component from its River block. -func (cn *ComponentNode) ID() ComponentID { return cn.id } +func (cn *BuiltinComponentNode) ID() ComponentID { return cn.id } // Label returns the label for the block or "" if none was specified. -func (cn *ComponentNode) Label() string { return cn.label } +func (cn *BuiltinComponentNode) Label() string { return cn.label } // ComponentName returns the component's type, i.e. `local.file.test` returns `local.file`. -func (cn *ComponentNode) ComponentName() string { return cn.componentName } +func (cn *BuiltinComponentNode) ComponentName() string { return cn.componentName } // NodeID implements dag.Node and returns the unique ID for this node. The // NodeID is the string representation of the component's ID from its River // block. -func (cn *ComponentNode) NodeID() string { return cn.nodeID } +func (cn *BuiltinComponentNode) NodeID() string { return cn.nodeID } // UpdateBlock updates the River block used to construct arguments for the // managed component. The new block isn't used until the next time Evaluate is // invoked. // // UpdateBlock will panic if the block does not match the component ID of the -// ComponentNode. -func (cn *ComponentNode) UpdateBlock(b *ast.BlockStmt) { +// BuiltinComponentNode. +func (cn *BuiltinComponentNode) UpdateBlock(b *ast.BlockStmt) { if !BlockComponentID(b).Equals(cn.id) { panic("UpdateBlock called with an River block with a different component ID") } @@ -239,7 +237,7 @@ func (cn *ComponentNode) UpdateBlock(b *ast.BlockStmt) { // // Evaluate will return an error if the River block cannot be evaluated or if // decoding to arguments fails. -func (cn *ComponentNode) Evaluate(scope *vm.Scope) error { +func (cn *BuiltinComponentNode) Evaluate(scope *vm.Scope) error { err := cn.evaluate(scope) switch err { @@ -252,7 +250,7 @@ func (cn *ComponentNode) Evaluate(scope *vm.Scope) error { return err } -func (cn *ComponentNode) evaluate(scope *vm.Scope) error { +func (cn *BuiltinComponentNode) evaluate(scope *vm.Scope) error { cn.mut.Lock() defer cn.mut.Unlock() @@ -299,7 +297,7 @@ func (cn *ComponentNode) evaluate(scope *vm.Scope) error { // // Run will immediately return ErrUnevaluated if Evaluate has never been called // successfully. Otherwise, Run will return nil. -func (cn *ComponentNode) Run(ctx context.Context) error { +func (cn *BuiltinComponentNode) Run(ctx context.Context) error { cn.mut.RLock() managed := cn.managed cn.mut.RUnlock() @@ -325,19 +323,19 @@ func (cn *ComponentNode) Run(ctx context.Context) error { return err } -// ErrUnevaluated is returned if ComponentNode.Run is called before a managed +// ErrUnevaluated is returned if BuiltinComponentNode.Run is called before a managed // component is built. var ErrUnevaluated = errors.New("managed component not built") // Arguments returns the current arguments of the managed component. -func (cn *ComponentNode) Arguments() component.Arguments { +func (cn *BuiltinComponentNode) Arguments() component.Arguments { cn.mut.RLock() defer cn.mut.RUnlock() return cn.args } // Block implements BlockNode and returns the current block of the managed component. -func (cn *ComponentNode) Block() *ast.BlockStmt { +func (cn *BuiltinComponentNode) Block() *ast.BlockStmt { cn.mut.RLock() defer cn.mut.RUnlock() return cn.block @@ -345,7 +343,7 @@ func (cn *ComponentNode) Block() *ast.BlockStmt { // Exports returns the current set of exports from the managed component. // Exports returns nil if the managed component does not have exports. -func (cn *ComponentNode) Exports() component.Exports { +func (cn *BuiltinComponentNode) Exports() component.Exports { cn.exportsMut.RLock() defer cn.exportsMut.RUnlock() return cn.exports @@ -353,7 +351,7 @@ func (cn *ComponentNode) Exports() component.Exports { // setExports is called whenever the managed component updates. e must be the // same type as the registered exports type of the managed component. -func (cn *ComponentNode) setExports(e component.Exports) { +func (cn *BuiltinComponentNode) setExports(e component.Exports) { if cn.exportsType == nil { panic(fmt.Sprintf("Component %s called OnStateChange but never registered an Exports type", cn.nodeID)) } @@ -379,19 +377,18 @@ func (cn *ComponentNode) setExports(e component.Exports) { if changed { // Inform the controller that we have new exports. - cn.lastUpdateTime.Store(time.Now()) - cn.OnComponentUpdate(cn) + cn.OnBlockNodeUpdate(cn) } } -// CurrentHealth returns the current health of the ComponentNode. +// CurrentHealth returns the current health of the BuiltinComponentNode. // -// The health of a ComponentNode is determined by combining: +// The health of a BuiltinComponentNode is determined by combining: // // 1. Health from the call to Run(). // 2. Health from the last call to Evaluate(). // 3. Health reported from the component. -func (cn *ComponentNode) CurrentHealth() component.Health { +func (cn *BuiltinComponentNode) CurrentHealth() component.Health { cn.healthMut.RLock() defer cn.healthMut.RUnlock() @@ -409,7 +406,7 @@ func (cn *ComponentNode) CurrentHealth() component.Health { } // DebugInfo returns debugging information from the managed component (if any). -func (cn *ComponentNode) DebugInfo() interface{} { +func (cn *BuiltinComponentNode) DebugInfo() interface{} { cn.mut.RLock() defer cn.mut.RUnlock() @@ -421,7 +418,7 @@ func (cn *ComponentNode) DebugInfo() interface{} { // setEvalHealth sets the internal health from a call to Evaluate. See Health // for information on how overall health is calculated. -func (cn *ComponentNode) setEvalHealth(t component.HealthType, msg string) { +func (cn *BuiltinComponentNode) setEvalHealth(t component.HealthType, msg string) { cn.healthMut.Lock() defer cn.healthMut.Unlock() @@ -434,7 +431,7 @@ func (cn *ComponentNode) setEvalHealth(t component.HealthType, msg string) { // setRunHealth sets the internal health from a call to Run. See Health for // information on how overall health is calculated. -func (cn *ComponentNode) setRunHealth(t component.HealthType, msg string) { +func (cn *BuiltinComponentNode) setRunHealth(t component.HealthType, msg string) { cn.healthMut.Lock() defer cn.healthMut.Unlock() @@ -447,6 +444,6 @@ func (cn *ComponentNode) setRunHealth(t component.HealthType, msg string) { // ModuleIDs returns the current list of modules that this component is // managing. -func (cn *ComponentNode) ModuleIDs() []string { +func (cn *BuiltinComponentNode) ModuleIDs() []string { return cn.moduleController.ModuleIDs() } diff --git a/pkg/flow/internal/controller/node_component_test.go b/pkg/flow/internal/controller/node_builtin_component_test.go similarity index 93% rename from pkg/flow/internal/controller/node_component_test.go rename to pkg/flow/internal/controller/node_builtin_component_test.go index 6eb46f004601..6a1165b2cc6d 100644 --- a/pkg/flow/internal/controller/node_component_test.go +++ b/pkg/flow/internal/controller/node_builtin_component_test.go @@ -14,7 +14,7 @@ func TestGlobalID(t *testing.T) { NewModuleController: func(id string) ModuleController { return nil }, - }, &ComponentNode{ + }, &BuiltinComponentNode{ nodeID: "local.id", globalID: "module.file/local.id", }) @@ -28,7 +28,7 @@ func TestLocalID(t *testing.T) { NewModuleController: func(id string) ModuleController { return nil }, - }, &ComponentNode{ + }, &BuiltinComponentNode{ nodeID: "local.id", globalID: "local.id", }) diff --git a/pkg/flow/internal/controller/node_custom_component.go b/pkg/flow/internal/controller/node_custom_component.go new file mode 100644 index 000000000000..f770459451a9 --- /dev/null +++ b/pkg/flow/internal/controller/node_custom_component.go @@ -0,0 +1,298 @@ +package controller + +import ( + "context" + "fmt" + "path" + "reflect" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/grafana/agent/component" + "github.com/grafana/agent/pkg/flow/logging/level" + "github.com/grafana/river/ast" + "github.com/grafana/river/vm" +) + +// getCustomComponentConfig is used by the custom component to retrieve its template and the customComponentRegistry associated with it. +type getCustomComponentConfig func(componentName string) (ast.Body, *CustomComponentRegistry, error) + +// CustomComponentNode is a controller node which manages a custom component. +// +// CustomComponentNode manages the underlying custom component and caches its current +// arguments and exports. +type CustomComponentNode struct { + id ComponentID + globalID string + label string + componentName string + nodeID string // Cached from id.String() to avoid allocating new strings every time NodeID is called. + moduleController ModuleController + OnBlockNodeUpdate func(cn BlockNode) // Informs controller that we need to reevaluate + logger log.Logger + + getConfig getCustomComponentConfig // Retrieve the custom component config. + + mut sync.RWMutex + block *ast.BlockStmt // Current River block to derive args from + eval *vm.Evaluator + managed CustomComponent // Inner managed custom component + args component.Arguments // Evaluated arguments for the managed component + + // NOTE(rfratto): health and exports have their own mutex because they may be + // set asynchronously while mut is still being held (i.e., when calling Evaluate + // and the managed custom component immediately creates new exports) + + healthMut sync.RWMutex + evalHealth component.Health // Health of the last evaluate + runHealth component.Health // Health of running the component + + exportsMut sync.RWMutex + exports component.Exports // Evaluated exports for the managed custom component +} + +var _ ComponentNode = (*CustomComponentNode)(nil) + +// NewCustomComponentNode creates a new CustomComponentNode from an initial ast.BlockStmt. +// The underlying managed custom component isn't created until Evaluate is called. +func NewCustomComponentNode(globals ComponentGlobals, b *ast.BlockStmt, getConfig getCustomComponentConfig) *CustomComponentNode { + var ( + id = BlockComponentID(b) + nodeID = id.String() + ) + + initHealth := component.Health{ + Health: component.HealthTypeUnknown, + Message: "node custom component created", + UpdateTime: time.Now(), + } + + // We need to generate a globally unique component ID to give to the + // component and for use with telemetry data which doesn't support + // reconstructing the global ID. For everything else (HTTP, data), we can + // just use the controller-local ID as those values are guaranteed to be + // globally unique. + globalID := nodeID + if globals.ControllerID != "" { + globalID = path.Join(globals.ControllerID, nodeID) + } + + componentName := b.GetBlockName() + + cn := &CustomComponentNode{ + id: id, + globalID: globalID, + label: b.Label, + nodeID: nodeID, + componentName: componentName, + moduleController: globals.NewModuleController(globalID), + OnBlockNodeUpdate: globals.OnBlockNodeUpdate, + logger: log.With(globals.Logger, "component", globalID), + getConfig: getConfig, + + block: b, + eval: vm.New(b.Body), + + evalHealth: initHealth, + runHealth: initHealth, + } + + return cn +} + +// ID returns the component ID of the managed component from its River block. +func (cn *CustomComponentNode) ID() ComponentID { return cn.id } + +// Label returns the label for the block or "" if none was specified. +func (cn *CustomComponentNode) Label() string { return cn.label } + +// NodeID implements dag.Node and returns the unique ID for this node. The +// NodeID is the string representation of the component's ID from its River +// block. +func (cn *CustomComponentNode) NodeID() string { return cn.nodeID } + +// UpdateBlock updates the River block used to construct arguments for the +// managed component. The new block isn't used until the next time Evaluate is +// invoked. +// +// UpdateBlock will panic if the block does not match the component ID of the +// CustomComponentNode. +func (cn *CustomComponentNode) UpdateBlock(b *ast.BlockStmt) { + if !BlockComponentID(b).Equals(cn.id) { + panic("UpdateBlock called with an River block with a different component ID") + } + + cn.mut.Lock() + defer cn.mut.Unlock() + cn.block = b + cn.eval = vm.New(b.Body) +} + +// Evaluate implements BlockNode and updates the arguments by re-evaluating its River block with the provided scope and the custom component by +// retrieving the component definition from the corresponding declare node. +// The managed custom component will be built the first time Evaluate is called. +// +// Evaluate will return an error if the River block cannot be evaluated, if +// decoding to arguments fails or if the custom component definition cannot be retrieved. +func (cn *CustomComponentNode) Evaluate(evalScope *vm.Scope) error { + err := cn.evaluate(evalScope) + + switch err { + case nil: + cn.setEvalHealth(component.HealthTypeHealthy, "component evaluated") + default: + msg := fmt.Sprintf("component evaluation failed: %s", err) + cn.setEvalHealth(component.HealthTypeUnhealthy, msg) + } + return err +} + +func (cn *CustomComponentNode) evaluate(evalScope *vm.Scope) error { + cn.mut.Lock() + defer cn.mut.Unlock() + + var args map[string]any + if err := cn.eval.Evaluate(evalScope, &args); err != nil { + return fmt.Errorf("decoding River: %w", err) + } + + cn.args = args + + if cn.managed == nil { + // We haven't built the managed custom component successfully yet. + mod, err := cn.moduleController.NewCustomComponent("", func(exports map[string]any) { cn.setExports(exports) }) + if err != nil { + return fmt.Errorf("creating custom component controller: %w", err) + } + cn.managed = mod + } + + template, customComponentRegistry, err := cn.getConfig(cn.componentName) + if err != nil { + return fmt.Errorf("loading custom component controller: %w", err) + } + + // Reload the custom component with new config + if err := cn.managed.LoadBody(template, args, customComponentRegistry); err != nil { + return fmt.Errorf("updating custom component: %w", err) + } + return nil +} + +func (cn *CustomComponentNode) Run(ctx context.Context) error { + cn.mut.RLock() + managed := cn.managed + logger := cn.logger + cn.mut.RUnlock() + + if managed == nil { + return ErrUnevaluated + } + + cn.setRunHealth(component.HealthTypeHealthy, "started custom component") + err := managed.Run(ctx) + if err != nil { + level.Error(logger).Log("msg", "error running custom component", "id", cn.nodeID, "err", err) + } + + level.Info(logger).Log("msg", "custom component exited") + cn.setRunHealth(component.HealthTypeExited, "custom component shut down") + return err +} + +// Arguments returns the current arguments of the managed custom component. +func (cn *CustomComponentNode) Arguments() component.Arguments { + cn.mut.RLock() + defer cn.mut.RUnlock() + return cn.args +} + +// Block implements BlockNode and returns the current block of the managed custom component. +func (cn *CustomComponentNode) Block() *ast.BlockStmt { + cn.mut.RLock() + defer cn.mut.RUnlock() + return cn.block +} + +// Exports returns the current set of exports from the managed custom component. +// Exports returns nil if the managed custom component does not have exports. +func (cn *CustomComponentNode) Exports() component.Exports { + cn.exportsMut.RLock() + defer cn.exportsMut.RUnlock() + return cn.exports +} + +// setExports is called whenever the managed custom component updates. e must be the +// same type as the registered exports type of the managed custom component. +func (cn *CustomComponentNode) setExports(e component.Exports) { + // Some components may aggressively reexport values even though no exposed + // state has changed. This may be done for components which always supply + // exports whenever their arguments are evaluated without tracking internal + // state to see if anything actually changed. + // + // To avoid needlessly reevaluating components we'll ignore unchanged + // exports. + var changed bool + + cn.exportsMut.Lock() + if !reflect.DeepEqual(cn.exports, e) { + changed = true + cn.exports = e + } + cn.exportsMut.Unlock() + + if changed { + // Inform the controller that we have new exports. + cn.OnBlockNodeUpdate(cn) + } +} + +// CurrentHealth returns the current health of the CustomComponentNode. +// +// The health of a CustomComponentNode is determined by combining: +// +// 1. Health from the call to Run(). +// 2. Health from the last call to Evaluate(). +func (cn *CustomComponentNode) CurrentHealth() component.Health { + cn.healthMut.RLock() + defer cn.healthMut.RUnlock() + return component.LeastHealthy(cn.runHealth, cn.evalHealth) +} + +// setEvalHealth sets the internal health from a call to Evaluate. See Health +// for information on how overall health is calculated. +func (cn *CustomComponentNode) setEvalHealth(t component.HealthType, msg string) { + cn.healthMut.Lock() + defer cn.healthMut.Unlock() + + cn.evalHealth = component.Health{ + Health: t, + Message: msg, + UpdateTime: time.Now(), + } +} + +// setRunHealth sets the internal health from a call to Run. See Health for +// information on how overall health is calculated. +func (cn *CustomComponentNode) setRunHealth(t component.HealthType, msg string) { + cn.healthMut.Lock() + defer cn.healthMut.Unlock() + + cn.runHealth = component.Health{ + Health: t, + Message: msg, + UpdateTime: time.Now(), + } +} + +// ComponentName returns the name of the component. +func (cn *CustomComponentNode) ComponentName() string { + return cn.componentName +} + +// TODO: currently used by the component provider to access the components running within +// the custom components. Change it when getting rid of old modules. +func (cn *CustomComponentNode) ModuleIDs() []string { + return cn.moduleController.ModuleIDs() +} diff --git a/pkg/flow/internal/controller/node_declare.go b/pkg/flow/internal/controller/node_declare.go new file mode 100644 index 000000000000..7baf6ce113b8 --- /dev/null +++ b/pkg/flow/internal/controller/node_declare.go @@ -0,0 +1,42 @@ +package controller + +import ( + "github.com/grafana/river/ast" + "github.com/grafana/river/vm" +) + +// DeclareNode represents a declare block in the DAG. +type DeclareNode struct { + label string + nodeID string + componentName string + block *ast.BlockStmt +} + +var _ BlockNode = (*DeclareNode)(nil) + +// NewDeclareNode creates a new declare node with a content which will be loaded by custom components. +func NewDeclareNode(block *ast.BlockStmt) *DeclareNode { + return &DeclareNode{ + label: block.Label, + nodeID: BlockComponentID(block).String(), + componentName: block.GetBlockName(), + block: block, + } +} + +// Evaluate does nothing for this node. +func (cn *DeclareNode) Evaluate(scope *vm.Scope) error { + return nil +} + +// Label returns the label of the block. +func (cn *DeclareNode) Label() string { return cn.label } + +// Block implements BlockNode and returns the current block of the managed config node. +func (cn *DeclareNode) Block() *ast.BlockStmt { + return cn.block +} + +// NodeID implements dag.Node and returns the unique ID for the config node. +func (cn *DeclareNode) NodeID() string { return cn.nodeID } diff --git a/pkg/flow/internal/controller/node_service.go b/pkg/flow/internal/controller/node_service.go index 42bb73cfcc81..8d3a3303ea4e 100644 --- a/pkg/flow/internal/controller/node_service.go +++ b/pkg/flow/internal/controller/node_service.go @@ -24,10 +24,7 @@ type ServiceNode struct { args component.Arguments // Evaluated arguments for the managed component } -var ( - _ BlockNode = (*ServiceNode)(nil) - _ RunnableNode = (*ServiceNode)(nil) -) +var _ RunnableNode = (*ServiceNode)(nil) // NewServiceNode creates a new instance of a ServiceNode from an instance of a // Service. The provided host is used when running the service. diff --git a/pkg/flow/internal/controller/queue.go b/pkg/flow/internal/controller/queue.go index a8cd1b5bae05..65c1448573b9 100644 --- a/pkg/flow/internal/controller/queue.go +++ b/pkg/flow/internal/controller/queue.go @@ -2,32 +2,37 @@ package controller import ( "sync" + "time" ) -// Queue is a thread-safe, insertion-ordered set of components. +// Queue is a thread-safe, insertion-ordered set of nodes. // -// Queue is intended for tracking components that have updated their Exports -// for later reevaluation. +// Queue is intended for tracking nodes that have been updated for later reevaluation. type Queue struct { mut sync.Mutex - queuedSet map[*ComponentNode]struct{} - queuedOrder []*ComponentNode + queuedSet map[*QueuedNode]struct{} + queuedOrder []*QueuedNode updateCh chan struct{} } +type QueuedNode struct { + Node BlockNode + LastUpdatedTime time.Time +} + // NewQueue returns a new queue. func NewQueue() *Queue { return &Queue{ updateCh: make(chan struct{}, 1), - queuedSet: make(map[*ComponentNode]struct{}), - queuedOrder: make([]*ComponentNode, 0), + queuedSet: make(map[*QueuedNode]struct{}), + queuedOrder: make([]*QueuedNode, 0), } } -// Enqueue inserts a new component into the Queue. Enqueue is a no-op if the -// component is already in the Queue. -func (q *Queue) Enqueue(c *ComponentNode) { +// Enqueue inserts a new BlockNode into the Queue. Enqueue is a no-op if the +// BlockNode is already in the Queue. +func (q *Queue) Enqueue(c *QueuedNode) { q.mut.Lock() defer q.mut.Unlock() @@ -47,14 +52,14 @@ func (q *Queue) Enqueue(c *ComponentNode) { // Chan returns a channel which is written to when the queue is non-empty. func (q *Queue) Chan() <-chan struct{} { return q.updateCh } -// DequeueAll removes all components from the queue and returns them. -func (q *Queue) DequeueAll() []*ComponentNode { +// DequeueAll removes all BlockNode from the queue and returns them. +func (q *Queue) DequeueAll() []*QueuedNode { q.mut.Lock() defer q.mut.Unlock() all := q.queuedOrder - q.queuedOrder = make([]*ComponentNode, 0) - q.queuedSet = make(map[*ComponentNode]struct{}) + q.queuedOrder = make([]*QueuedNode, 0) + q.queuedSet = make(map[*QueuedNode]struct{}) return all } diff --git a/pkg/flow/internal/controller/queue_test.go b/pkg/flow/internal/controller/queue_test.go index c93fb14ef8fc..c0a7cd930675 100644 --- a/pkg/flow/internal/controller/queue_test.go +++ b/pkg/flow/internal/controller/queue_test.go @@ -9,7 +9,7 @@ import ( ) func TestEnqueueDequeue(t *testing.T) { - tn := &ComponentNode{} + tn := &QueuedNode{} q := NewQueue() q.Enqueue(tn) require.Lenf(t, q.queuedSet, 1, "queue should be 1") @@ -26,7 +26,7 @@ func TestDequeue_Empty(t *testing.T) { } func TestDequeue_InOrder(t *testing.T) { - c1, c2, c3 := &ComponentNode{}, &ComponentNode{}, &ComponentNode{} + c1, c2, c3 := &QueuedNode{}, &QueuedNode{}, &QueuedNode{} q := NewQueue() q.Enqueue(c1) q.Enqueue(c2) @@ -41,7 +41,7 @@ func TestDequeue_InOrder(t *testing.T) { } func TestDequeue_NoDuplicates(t *testing.T) { - c1, c2 := &ComponentNode{}, &ComponentNode{} + c1, c2 := &QueuedNode{}, &QueuedNode{} q := NewQueue() q.Enqueue(c1) q.Enqueue(c1) @@ -58,7 +58,7 @@ func TestDequeue_NoDuplicates(t *testing.T) { } func TestEnqueue_ChannelNotification(t *testing.T) { - c1 := &ComponentNode{} + c1 := &QueuedNode{} q := NewQueue() notificationsCount := atomic.Int32{} diff --git a/pkg/flow/internal/controller/scheduler.go b/pkg/flow/internal/controller/scheduler.go index fe0576d49da6..10993aa80194 100644 --- a/pkg/flow/internal/controller/scheduler.go +++ b/pkg/flow/internal/controller/scheduler.go @@ -6,9 +6,9 @@ import ( "sync" ) -// RunnableNode is any dag.Node which can also be run. +// RunnableNode is any BlockNode which can also be run. type RunnableNode interface { - NodeID() string + BlockNode Run(ctx context.Context) error } diff --git a/pkg/flow/internal/controller/scheduler_test.go b/pkg/flow/internal/controller/scheduler_test.go index 644423f43699..c965d99c9db7 100644 --- a/pkg/flow/internal/controller/scheduler_test.go +++ b/pkg/flow/internal/controller/scheduler_test.go @@ -7,6 +7,8 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/pkg/flow/internal/controller" + "github.com/grafana/river/ast" + "github.com/grafana/river/vm" "github.com/stretchr/testify/require" ) @@ -93,8 +95,10 @@ type fakeRunnable struct { var _ controller.RunnableNode = fakeRunnable{} -func (fr fakeRunnable) NodeID() string { return fr.ID } -func (fr fakeRunnable) Run(ctx context.Context) error { return fr.Component.Run(ctx) } +func (fr fakeRunnable) NodeID() string { return fr.ID } +func (fr fakeRunnable) Run(ctx context.Context) error { return fr.Component.Run(ctx) } +func (fr fakeRunnable) Block() *ast.BlockStmt { return nil } +func (fr fakeRunnable) Evaluate(scope *vm.Scope) error { return nil } type mockComponent struct { RunFunc func(ctx context.Context) error diff --git a/pkg/flow/module.go b/pkg/flow/module.go index ec97aab093d5..5c5c5609faf6 100644 --- a/pkg/flow/module.go +++ b/pkg/flow/module.go @@ -12,6 +12,7 @@ import ( "github.com/grafana/agent/pkg/flow/logging" "github.com/grafana/agent/pkg/flow/logging/level" "github.com/grafana/agent/pkg/flow/tracing" + "github.com/grafana/river/ast" "github.com/grafana/river/scanner" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/maps" @@ -58,6 +59,29 @@ func (m *moduleController) NewModule(id string, export component.ExportFunc) (co return mod, nil } +// NewCustomComponent creates a new, unstarted CustomComponent. +func (m *moduleController) NewCustomComponent(id string, export component.ExportFunc) (controller.CustomComponent, error) { + if id != "" && !scanner.IsValidIdentifier(id) { + return nil, fmt.Errorf("customComponent ID %q is not a valid River identifier", id) + } + + m.mut.Lock() + defer m.mut.Unlock() + fullPath := m.o.ID + if id != "" { + fullPath = path.Join(fullPath, id) + } + + mod := newModule(&moduleOptions{ + ID: fullPath, + export: export, + moduleControllerOptions: m.o, + parent: m, + }) + + return mod, nil +} + func (m *moduleController) removeModule(mod *module) { m.mut.Lock() defer m.mut.Unlock() @@ -136,6 +160,15 @@ func (c *module) LoadConfig(config []byte, args map[string]any) error { return c.f.LoadSource(ff, args) } +// LoadBody loads a pre-parsed River config. +func (c *module) LoadBody(body ast.Body, args map[string]any, customComponentRegistry *controller.CustomComponentRegistry) error { + ff, err := sourceFromBody(body) + if err != nil { + return err + } + return c.f.loadSource(ff, args, customComponentRegistry) +} + // Run starts the Module. No components within the Module // will be run until Run is called. // diff --git a/pkg/flow/module_caching_test.go b/pkg/flow/module_eval_test.go similarity index 72% rename from pkg/flow/module_caching_test.go rename to pkg/flow/module_eval_test.go index e22e0583cbda..8b6d02c7c7d4 100644 --- a/pkg/flow/module_caching_test.go +++ b/pkg/flow/module_eval_test.go @@ -1,7 +1,7 @@ package flow_test -// This file contains tests which verify that the Flow controller correctly updates and caches modules' arguments -// and exports in presence of multiple components. +// This file contains tests which verify that the Flow controller correctly evaluates and updates modules, including +// the module's arguments and exports. import ( "context" @@ -141,6 +141,74 @@ func TestUpdates_ThroughModule(t *testing.T) { }, 3*time.Second, 10*time.Millisecond) } +func TestUpdates_TwoModules_SameCompNames(t *testing.T) { + // We use this module in a Flow config below. + module := ` + testcomponents.count "inc" { + frequency = "1ms" + max = 100 + } + + testcomponents.passthrough "pt" { + input = testcomponents.count.inc.count + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +` + + // We run two modules with above body, which will have the same component names, but different module IDs. + config := ` + module.string "test_1" { + content = ` + strconv.Quote(module) + ` + } + + testcomponents.summation "sum_1" { + input = module.string.test_1.exports.output + } + + module.string "test_2" { + content = ` + strconv.Quote(module) + ` + } + + testcomponents.summation "sum_2" { + input = module.string.test_2.exports.output + } +` + + ctrl := flow.New(testOptions(t)) + f, err := flow.ParseSource(t.Name(), []byte(config)) + require.NoError(t, err) + require.NotNil(t, f) + + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + ctrl.Run(ctx) + close(done) + }() + defer func() { + cancel() + <-done + }() + + // Verify updates propagated correctly. + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum_1") + return export.LastAdded == 100 + }, 3*time.Second, 10*time.Millisecond) + + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum_2") + return export.LastAdded == 100 + }, 3*time.Second, 10*time.Millisecond) +} + func testOptions(t *testing.T) flow.Options { t.Helper() s, err := logging.New(os.Stderr, logging.DefaultOptions) diff --git a/pkg/flow/module_fail_test.go b/pkg/flow/module_fail_test.go index 28fb0923a892..071c36813be3 100644 --- a/pkg/flow/module_fail_test.go +++ b/pkg/flow/module_fail_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/grafana/agent/pkg/flow/componenttest" + "github.com/grafana/agent/pkg/flow/internal/controller" "github.com/stretchr/testify/require" ) @@ -23,7 +24,7 @@ func TestIDRemovalIfFailedToLoad(t *testing.T) { go f.Run(ctx) var t1 *componenttest.TestFailModule require.Eventually(t, func() bool { - t1 = f.loader.Components()[0].Component().(*componenttest.TestFailModule) + t1 = f.loader.Components()[0].(*controller.BuiltinComponentNode).Component().(*componenttest.TestFailModule) return t1 != nil }, 10*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { diff --git a/pkg/flow/module_test.go b/pkg/flow/module_test.go index 4e4ddb9faaa8..c5f4417c84c3 100644 --- a/pkg/flow/module_test.go +++ b/pkg/flow/module_test.go @@ -7,8 +7,10 @@ import ( "time" "github.com/grafana/agent/component" + "github.com/grafana/agent/pkg/flow/internal/controller" "github.com/grafana/agent/pkg/flow/internal/worker" "github.com/grafana/agent/pkg/flow/logging" + "github.com/grafana/agent/service" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -43,6 +45,9 @@ const exportDummy = ` value = "bob" }` +const serviceConfig = ` + testservice {}` + func TestModule(t *testing.T) { tt := []struct { name string @@ -72,6 +77,12 @@ func TestModule(t *testing.T) { exportModuleContent: exportStringConfig, expectedErrorContains: "tracing block not allowed inside a module", }, + { + name: "Service blocks not allowed in module config", + argumentModuleContent: argumentConfig + serviceConfig, + exportModuleContent: exportStringConfig, + expectedErrorContains: "service blocks not allowed inside a module: \"testservice\"", + }, { name: "Argument not defined in module source", argumentModuleContent: `argument "different_argument" {}`, @@ -245,12 +256,19 @@ func testModuleControllerOptions(t *testing.T) *moduleControllerOptions { s, err := logging.New(os.Stderr, logging.DefaultOptions) require.NoError(t, err) + services := []service.Service{ + &testService{}, + } + + serviceMap := controller.NewServiceMap(services) + return &moduleControllerOptions{ Logger: s, DataPath: t.TempDir(), Reg: prometheus.NewRegistry(), ModuleRegistry: newModuleRegistry(), WorkerPool: worker.NewFixedWorkerPool(1, 100), + ServiceMap: serviceMap, } } @@ -307,3 +325,23 @@ func (t *testModule) Run(ctx context.Context) error { func (t *testModule) Update(_ component.Arguments) error { return nil } + +type testService struct{} + +func (t *testService) Definition() service.Definition { + return service.Definition{ + Name: "testservice", + } +} + +func (t *testService) Run(ctx context.Context, host service.Host) error { + return nil +} + +func (t *testService) Update(newConfig any) error { + return nil +} + +func (t *testService) Data() any { + return nil +} diff --git a/pkg/flow/source.go b/pkg/flow/source.go index acd7d2ce2f58..cb36a26aec4f 100644 --- a/pkg/flow/source.go +++ b/pkg/flow/source.go @@ -19,8 +19,9 @@ type Source struct { // Components holds the list of raw River AST blocks describing components. // The Flow controller can interpret them. - components []*ast.BlockStmt - configBlocks []*ast.BlockStmt + components []*ast.BlockStmt + configBlocks []*ast.BlockStmt + declareBlocks []*ast.BlockStmt } // ParseSource parses the River file specified by bb into a File. name should be @@ -36,7 +37,18 @@ func ParseSource(name string, bb []byte) (*Source, error) { if err != nil { return nil, err } + source, err := sourceFromBody(node.Body) + if err != nil { + return nil, err + } + source.sourceMap = map[string][]byte{name: bb} + source.hash = sha256.Sum256(bb) + return source, nil +} +// sourceFromBody creates a Source from an existing AST. This must only be used +// internally as there will be no sourceMap or hash. +func sourceFromBody(body ast.Body) (*Source, error) { // Look for predefined non-components blocks (i.e., logging), and store // everything else into a list of components. // @@ -45,9 +57,10 @@ func ParseSource(name string, bb []byte) (*Source, error) { var ( components []*ast.BlockStmt configs []*ast.BlockStmt + declares []*ast.BlockStmt ) - for _, stmt := range node.Body { + for _, stmt := range body { switch stmt := stmt.(type) { case *ast.AttributeStmt: return nil, diag.Diagnostic{ @@ -60,6 +73,8 @@ func ParseSource(name string, bb []byte) (*Source, error) { case *ast.BlockStmt: fullName := strings.Join(stmt.Name, ".") switch fullName { + case "declare": + declares = append(declares, stmt) case "logging", "tracing", "argument", "export": configs = append(configs, stmt) default: @@ -77,10 +92,9 @@ func ParseSource(name string, bb []byte) (*Source, error) { } return &Source{ - components: components, - configBlocks: configs, - sourceMap: map[string][]byte{name: bb}, - hash: sha256.Sum256(bb), + components: components, + configBlocks: configs, + declareBlocks: declares, }, nil } @@ -120,6 +134,7 @@ func ParseSources(sources map[string][]byte) (*Source, error) { mergedSource.components = append(mergedSource.components, sourceFragment.components...) mergedSource.configBlocks = append(mergedSource.configBlocks, sourceFragment.configBlocks...) + mergedSource.declareBlocks = append(mergedSource.declareBlocks, sourceFragment.declareBlocks...) } mergedSource.hash = [32]byte(hash.Sum(nil)) diff --git a/pkg/integrations/v2/register.go b/pkg/integrations/v2/register.go index 0deb356c6cb9..52b26b7794c4 100644 --- a/pkg/integrations/v2/register.go +++ b/pkg/integrations/v2/register.go @@ -228,7 +228,7 @@ func MarshalYAML(v interface{}) (interface{}, error) { panic(fmt.Sprintf("config not registered: %T", data)) } - if _, exists := uniqueSingletons[fieldName]; exists { + if _, exists := uniqueSingletons[fieldName]; exists && integrationType == TypeSingleton { return nil, fmt.Errorf("integration %q may not be defined more than once", fieldName) } uniqueSingletons[fieldName] = struct{}{} diff --git a/pkg/integrations/v2/register_test.go b/pkg/integrations/v2/register_test.go index 2e883131144e..df99cf50b481 100644 --- a/pkg/integrations/v2/register_test.go +++ b/pkg/integrations/v2/register_test.go @@ -186,6 +186,39 @@ func TestIntegrationRegistration_Marshal_MultipleSingleton(t *testing.T) { require.EqualError(t, err, `integration "test" may not be defined more than once`) } +func TestIntegrationRegistration_Marshal_Multiplex(t *testing.T) { + setRegistered(t, map[Config]Type{ + &testIntegrationA{}: TypeMultiplex, + &testIntegrationB{}: TypeMultiplex, + }) + + // Generate an invalid config, which has two instances of a Singleton + // integration. + input := testFullConfig{ + Name: "John Doe", + Duration: 500 * time.Millisecond, + Default: 12345, + Configs: []Config{ + &testIntegrationA{Text: "Hello, world!", Truth: true}, + &testIntegrationA{Text: "Hello again!", Truth: true}, + }, + } + + expectedCfg := `name: John Doe +duration: 500ms +default: 12345 +test_configs: +- text: Hello, world! + truth: true +- text: Hello again! + truth: true +` + + cfg, err := yaml.Marshal(&input) + require.NoError(t, err) + require.Equal(t, expectedCfg, string(cfg)) +} + type legacyConfig struct { Text string `yaml:"text"` } diff --git a/pkg/metrics/instance/instance_test.go b/pkg/metrics/instance/instance_test.go index 5b9ee503abb7..e82117e797df 100644 --- a/pkg/metrics/instance/instance_test.go +++ b/pkg/metrics/instance/instance_test.go @@ -416,6 +416,5 @@ func runInstance(t *testing.T, i *Instance) { t.Cleanup(func() { cancel() }) go require.NotPanics(t, func() { _ = i.Run(ctx) - require.NotNil(t, i.WriteHandler()) }) } diff --git a/pkg/mimir/client/client.go b/pkg/mimir/client/client.go index 9145e7b56e41..b6ed18068857 100644 --- a/pkg/mimir/client/client.go +++ b/pkg/mimir/client/client.go @@ -20,22 +20,18 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" ) -const ( - rulerAPIPath = "/prometheus/config/v1/rules" - legacyAPIPath = "/api/v1/rules" -) - var ( - ErrNoConfig = errors.New("No config exists for this user") + ErrNoConfig = errors.New("no config exists for this user") ErrResourceNotFound = errors.New("requested resource not found") ) // Config is used to configure a MimirClient. type Config struct { - ID string - Address string - UseLegacyRoutes bool - HTTPClientConfig config.HTTPClientConfig + ID string + Address string + UseLegacyRoutes bool + HTTPClientConfig config.HTTPClientConfig + PrometheusHTTPPrefix string } type Interface interface { @@ -65,9 +61,12 @@ func New(logger log.Logger, cfg Config, timingHistogram *prometheus.HistogramVec return nil, err } - path := rulerAPIPath + path, err := url.JoinPath(cfg.PrometheusHTTPPrefix, "/config/v1/rules") + if err != nil { + return nil, err + } if cfg.UseLegacyRoutes { - path = legacyAPIPath + path = "/api/v1/rules" } collector := instrument.NewHistogramCollector(timingHistogram) diff --git a/pkg/mimir/client/client_test.go b/pkg/mimir/client/client_test.go index 5ef8a373519b..262e9918a9bf 100644 --- a/pkg/mimir/client/client_test.go +++ b/pkg/mimir/client/client_test.go @@ -79,6 +79,13 @@ func TestBuildURL(t *testing.T) { url: "http://mimir.local/apathto", resultURL: "http://mimir.local/apathto/prometheus/config/v1/rules/last-char-slash%2F", }, + { + name: "builds the correct URL with a customized prometheus_http_prefix", + path: "/mimir/config/v1/rules", + method: http.MethodPost, + url: "http://mimir.local/", + resultURL: "http://mimir.local/mimir/config/v1/rules", + }, } for _, tt := range tc { diff --git a/pkg/mimir/client/rules_test.go b/pkg/mimir/client/rules_test.go index e2ab18a17839..a4ccdde6e509 100644 --- a/pkg/mimir/client/rules_test.go +++ b/pkg/mimir/client/rules_test.go @@ -22,49 +22,63 @@ func TestMimirClient_X(t *testing.T) { })) defer ts.Close() - client, err := New(log.NewNopLogger(), Config{ - Address: ts.URL, - }, prometheus.NewHistogramVec(prometheus.HistogramOpts{}, instrument.HistogramCollectorBuckets)) - require.NoError(t, err) - for _, tc := range []struct { - test string - namespace string - name string - expURLPath string + test string + namespace string + name string + prometheusHTTPPrefix string + expURLPath string }{ { - test: "regular-characters", - namespace: "my-namespace", - name: "my-name", - expURLPath: "/prometheus/config/v1/rules/my-namespace/my-name", + test: "regular-characters", + namespace: "my-namespace", + name: "my-name", + expURLPath: "/prometheus/config/v1/rules/my-namespace/my-name", + prometheusHTTPPrefix: "/prometheus", + }, + { + test: "special-characters-spaces", + namespace: "My: Namespace", + name: "My: Name", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My:%20Namespace/My:%20Name", }, { - test: "special-characters-spaces", - namespace: "My: Namespace", - name: "My: Name", - expURLPath: "/prometheus/config/v1/rules/My:%20Namespace/My:%20Name", + test: "special-characters-slashes", + namespace: "My/Namespace", + name: "My/Name", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/My%2FName", }, { - test: "special-characters-slashes", - namespace: "My/Namespace", - name: "My/Name", - expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/My%2FName", + test: "special-characters-slash-first", + namespace: "My/Namespace", + name: "/first-char-slash", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/%2Ffirst-char-slash", }, { - test: "special-characters-slash-first", - namespace: "My/Namespace", - name: "/first-char-slash", - expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/%2Ffirst-char-slash", + test: "special-characters-slash-last", + namespace: "My/Namespace", + name: "last-char-slash/", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/last-char-slash%2F", }, { - test: "special-characters-slash-last", - namespace: "My/Namespace", - name: "last-char-slash/", - expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/last-char-slash%2F", + test: "regular-characters-with-customized-prometheus-http-prefix", + namespace: "My/Namespace", + name: "last-char-slash/", + prometheusHTTPPrefix: "/mimir", + expURLPath: "/mimir/config/v1/rules/My%2FNamespace/last-char-slash%2F", }, } { t.Run(tc.test, func(t *testing.T) { + client, err := New(log.NewNopLogger(), Config{ + Address: ts.URL, + PrometheusHTTPPrefix: tc.prometheusHTTPPrefix, + }, prometheus.NewHistogramVec(prometheus.HistogramOpts{}, instrument.HistogramCollectorBuckets)) + require.NoError(t, err) + ctx := context.Background() require.NoError(t, client.DeleteRuleGroup(ctx, tc.namespace, tc.name)) diff --git a/pkg/operator/defaults.go b/pkg/operator/defaults.go index bc9cff6ab04e..25e4e05e986d 100644 --- a/pkg/operator/defaults.go +++ b/pkg/operator/defaults.go @@ -2,7 +2,7 @@ package operator // Supported versions of the Grafana Agent. var ( - DefaultAgentVersion = "v0.39.0" + DefaultAgentVersion = "v0.39.2" DefaultAgentBaseImage = "grafana/agent" DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion ) diff --git a/tools/agentlint/go.mod b/tools/agentlint/go.mod index caa90ea495ce..88704e137a6e 100644 --- a/tools/agentlint/go.mod +++ b/tools/agentlint/go.mod @@ -2,9 +2,6 @@ module github.com/grafana/agent/tools/agentlint go 1.19 -require golang.org/x/tools v0.4.0 +require golang.org/x/tools v0.17.0 -require ( - golang.org/x/mod v0.7.0 // indirect - golang.org/x/sys v0.3.0 // indirect -) +require golang.org/x/mod v0.14.0 // indirect diff --git a/tools/agentlint/go.sum b/tools/agentlint/go.sum index 9957b846b1ec..d9ba5b9c878a 100644 --- a/tools/agentlint/go.sum +++ b/tools/agentlint/go.sum @@ -1,9 +1,5 @@ -github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= diff --git a/tools/gen-versioned-files/agent-version.txt b/tools/gen-versioned-files/agent-version.txt index 999889149ec2..76b2d23e8697 100644 --- a/tools/gen-versioned-files/agent-version.txt +++ b/tools/gen-versioned-files/agent-version.txt @@ -1 +1 @@ -v0.39.0 \ No newline at end of file +v0.39.2 \ No newline at end of file diff --git a/tools/generate-crds.bash b/tools/generate-crds.bash index 4a46f884c657..c081dcc8b0e8 100755 --- a/tools/generate-crds.bash +++ b/tools/generate-crds.bash @@ -6,14 +6,14 @@ ROOT=$(git rev-parse --show-toplevel) # Generate objects and controllers for our CRDs cd $ROOT/pkg/operator/apis/monitoring/v1alpha1 controller-gen object paths=. -controller-gen crd:crdVersions=v1 paths=. output:crd:dir=$ROOT/operations/agent-static-operator/crds +controller-gen crd:crdVersions=v1,maxDescLen=0 paths=. output:crd:dir=$ROOT/operations/agent-static-operator/crds # Generate CRDs for prometheus-operator. PROM_OP_DEP_NAME="github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" PROM_OP_DIR=$(go list -f '{{.Dir}}' $PROM_OP_DEP_NAME) cd $PROM_OP_DIR -controller-gen crd:crdVersions=v1 paths=. output:crd:dir=$ROOT/operations/agent-static-operator/crds +controller-gen crd:crdVersions=v1,maxDescLen=0 paths=. output:crd:dir=$ROOT/operations/agent-static-operator/crds # Remove known Prometheus-Operator CRDS we don't generate. (An allowlist would # be better here, but rfratto's bash skills are bad.) diff --git a/tools/make/build-container.mk b/tools/make/build-container.mk index 36ed221bb961..be1d4be9ca49 100644 --- a/tools/make/build-container.mk +++ b/tools/make/build-container.mk @@ -34,7 +34,7 @@ # variable names should be passed through to the container. USE_CONTAINER ?= 0 -BUILD_IMAGE_VERSION ?= 0.30.4 +BUILD_IMAGE_VERSION ?= 0.31.0 BUILD_IMAGE ?= grafana/agent-build-image:$(BUILD_IMAGE_VERSION) DOCKER_OPTS ?= -it