diff --git a/.drone/drone.yml b/.drone/drone.yml index 982d2f04bc1d..43e4161ef322 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -75,7 +75,7 @@ steps: - commands: - apt-get update -y && apt-get install -y libsystemd-dev - make lint - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Lint trigger: event: @@ -93,7 +93,7 @@ steps: - ERR_MSG="Dashboard definitions are out of date. Please run 'make generate-dashboards' and commit changes!" - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Regenerate dashboards trigger: event: @@ -111,7 +111,7 @@ steps: - ERR_MSG="Custom Resource Definitions are out of date. Please run 'make generate-crds' and commit changes!" - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Regenerate crds trigger: event: @@ -129,7 +129,7 @@ steps: - ERR_MSG="The environment manifests are out of date. Please run 'make generate-manifests' and commit changes!" - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Regenerate environment manifests trigger: event: @@ -144,7 +144,7 @@ platform: steps: - commands: - make GO_TAGS="nodocker" test - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Run Go tests trigger: event: @@ -159,7 +159,7 @@ platform: steps: - commands: - K8S_USE_DOCKER_NETWORK=1 make test - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Run Go tests volumes: - name: docker @@ -181,8 +181,8 @@ platform: version: "1809" steps: - commands: - - go test -tags="nodocker,nonetwork" $(go list ./... | grep -v integration-tests) - image: grafana/agent-build-image:0.30.3-windows + - go test -tags="nodocker,nonetwork" ./... + image: grafana/agent-build-image:0.30.4-windows name: Run Go tests trigger: ref: @@ -197,7 +197,7 @@ platform: steps: - commands: - make agent-image - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build container volumes: - name: docker @@ -222,7 +222,7 @@ platform: steps: - commands: - make agentctl-image - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build container volumes: - name: docker @@ -247,7 +247,7 @@ platform: steps: - commands: - make operator-image - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build container volumes: - name: docker @@ -273,7 +273,7 @@ platform: steps: - commands: - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agent' - image: grafana/agent-build-image:0.30.3-windows + image: grafana/agent-build-image:0.30.4-windows name: Build container volumes: - name: docker @@ -299,7 +299,7 @@ platform: steps: - commands: - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agentctl' - image: grafana/agent-build-image:0.30.3-windows + image: grafana/agent-build-image:0.30.4-windows name: Build container volumes: - name: docker @@ -326,7 +326,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -343,7 +343,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -360,7 +360,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -377,7 +377,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -393,7 +393,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -409,7 +409,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -425,7 +425,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -441,7 +441,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -458,7 +458,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -475,7 +475,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -492,7 +492,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -509,7 +509,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -525,7 +525,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -541,7 +541,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -557,7 +557,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -573,7 +573,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -590,7 +590,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -607,7 +607,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -624,7 +624,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -641,7 +641,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -657,7 +657,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -673,7 +673,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -689,7 +689,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -705,7 +705,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -722,7 +722,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -739,7 +739,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -756,7 +756,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -773,7 +773,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -789,7 +789,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -805,7 +805,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -821,7 +821,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -837,7 +837,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -854,7 +854,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -871,7 +871,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Build trigger: event: @@ -887,7 +887,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Configure QEMU volumes: - name: docker @@ -907,7 +907,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Publish container volumes: - name: docker @@ -931,7 +931,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Configure QEMU volumes: - name: docker @@ -951,7 +951,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Publish container volumes: - name: docker @@ -975,7 +975,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Configure QEMU volumes: - name: docker @@ -995,7 +995,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Publish container volumes: - name: docker @@ -1019,7 +1019,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Configure QEMU volumes: - name: docker @@ -1039,7 +1039,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Publish container volumes: - name: docker @@ -1063,7 +1063,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Configure QEMU volumes: - name: docker @@ -1083,7 +1083,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Publish container volumes: - name: docker @@ -1107,7 +1107,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Configure QEMU volumes: - name: docker @@ -1127,7 +1127,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Publish container volumes: - name: docker @@ -1156,7 +1156,7 @@ steps: from_secret: docker_login DOCKER_PASSWORD: from_secret: docker_password - image: grafana/agent-build-image:0.30.3-windows + image: grafana/agent-build-image:0.30.4-windows name: Build containers volumes: - name: docker @@ -1185,7 +1185,7 @@ steps: from_secret: docker_login DOCKER_PASSWORD: from_secret: docker_password - image: grafana/agent-build-image:0.30.3-windows + image: grafana/agent-build-image:0.30.4-windows name: Build containers volumes: - name: docker @@ -1306,7 +1306,7 @@ steps: from_secret: gpg_private_key GPG_PUBLIC_KEY: from_secret: gpg_public_key - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Publish release volumes: - name: docker @@ -1331,7 +1331,7 @@ steps: - DOCKER_OPTS="" make dist/grafana-agentctl-linux-amd64 - DOCKER_OPTS="" make dist.temp/grafana-agent-flow-linux-amd64 - DOCKER_OPTS="" make test-packages - image: grafana/agent-build-image:0.30.3 + image: grafana/agent-build-image:0.30.4 name: Test Linux system packages volumes: - name: docker @@ -1427,6 +1427,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: 47d018f95267288b13edfd1bdabbab3bc60daa2674fda3ebeb713fac569586cb +hmac: 28ba52df6f22c10bf77a95386a49aff65a1c372127f7d89489ac2d3ee02ce618 ... diff --git a/.github/workflows/helm-release.yml b/.github/workflows/helm-release.yml index 76b1727e2514..b5a310bd4163 100644 --- a/.github/workflows/helm-release.yml +++ b/.github/workflows/helm-release.yml @@ -22,7 +22,7 @@ jobs: path: source - name: Install chart-testing - uses: helm/chart-testing-action@v2.6.0 + uses: helm/chart-testing-action@v2.6.1 - name: List changed charts id: list-changed diff --git a/.github/workflows/helm-test.yml b/.github/workflows/helm-test.yml index 321b3ecd93e7..9d4738bcd6cb 100644 --- a/.github/workflows/helm-test.yml +++ b/.github/workflows/helm-test.yml @@ -51,13 +51,13 @@ jobs: version: v3.10.3 - name: Install Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.9' check-latest: true - name: Install chart-testing - uses: helm/chart-testing-action@v2.6.0 + uses: helm/chart-testing-action@v2.6.1 - name: Determine changed charts id: list-changed diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 1d3e276ef4c3..4b9f7077ed57 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -14,10 +14,10 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: "1.21.0" + go-version: "1.21" - name: Set OTEL Exporter Endpoint - run: echo "OTEL_EXPORTER_ENDPOINT=http://172.17.0.1:8080" >> $GITHUB_ENV + run: echo "OTEL_EXPORTER_ENDPOINT=172.17.0.1:4318" >> $GITHUB_ENV - name: Run tests - run: make integration-test \ No newline at end of file + run: make integration-test diff --git a/.github/workflows/needs-attention.yml b/.github/workflows/needs-attention.yml index 6143be1fb2be..3e2d93a25ca6 100644 --- a/.github/workflows/needs-attention.yml +++ b/.github/workflows/needs-attention.yml @@ -10,7 +10,7 @@ jobs: needs-attention: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: days-before-stale: 30 days-before-close: -1 # never close automatically diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4963743342e1..59d4fbe34540 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -12,15 +12,15 @@ jobs: name: Test strategy: matrix: - platform: [macos-latest] + platform: [macos-latest-xlarge] runs-on: ${{ matrix.platform }} steps: - name: Checkout code uses: actions/checkout@v4 - name: Set up Go 1.21 - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: "1.21.0" + go-version: "1.21" cache: true - name: Test run: make GO_TAGS="nodocker" test diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 2f4a44515099..57fd6e855873 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -26,7 +26,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@b77b85c0254bba6789e787844f0585cde1e56320 + uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca with: image-ref: 'grafana/agent:main' format: 'template' @@ -35,6 +35,6 @@ jobs: severity: 'CRITICAL,HIGH,MEDIUM,LOW' - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3 with: sarif_file: 'trivy-results.sarif' \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index c632abfdc437..56073ffe1135 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,160 @@ internal API changes are not present. Main (unreleased) ----------------- +### Security fixes +- Fixes following vulnerabilities (@hainenber) + - [GO-2023-2409](https://github.com/advisories/GHSA-mhpq-9638-x6pw) + - [GO-2023-2412](https://github.com/advisories/GHSA-7ww5-4wqc-m92c) + - [CVE-2023-49568](https://github.com/advisories/GHSA-mw99-9chc-xw7r) + + +v0.39.0 (2024-01-09) +-------------------- + +### Breaking changes + +- `otelcol.receiver.prometheus` will drop all `otel_scope_info` metrics when converting them to OTLP. (@wildum) + - If the `otel_scope_info` metric has labels `otel_scope_name` and `otel_scope_version`, + their values will be used to set OTLP Instrumentation Scope name and version respectively. + - Labels of `otel_scope_info` metrics other than `otel_scope_name` and `otel_scope_version` + are added as scope attributes with the matching name and version. + +- The `target` block in `prometheus.exporter.blackbox` requires a mandatory `name` + argument instead of a block label. (@hainenber) + +- In the azure exporter, dimension options will no longer be validated by the Azure API. (@kgeckhart) + - This change will not break any existing configurations and you can opt in to validation via the `validate_dimensions` configuration option. + - Before this change, pulling metrics for azure resources with variable dimensions required one configuration per metric + dimension combination to avoid an error. + - After this change, you can include all metrics and dimensions in a single configuration and the Azure APIs will only return dimensions which are valid for the various metrics. + +### Features + +- A new `discovery.ovhcloud` component for discovering scrape targets on OVHcloud. (@ptodev) +- Allow specifying additional containers to run. (@juangom) + +### Enhancements + +- Flow Windows service: Support environment variables. (@jkroepke) + +- Allow disabling collection of root Cgroup stats in + `prometheus.exporter.cadvisor` (flow mode) and the `cadvisor` integration + (static mode). (@hainenber) + +- Grafana Agent on Windows now automatically restarts on failure. (@hainenber) + +- Added metrics, alerts and dashboard visualisations to help diagnose issues + with unhealthy components and components that take too long to evaluate. (@thampiotr) + +- The `http` config block may now reference exports from any component. + Previously, only `remote.*` and `local.*` components could be referenced + without a circular dependency. (@rfratto) + +- Add support for Basic Auth-secured connection with Elasticsearch cluster using `prometheus.exporter.elasticsearch`. (@hainenber) + +- Add a `resource_to_telemetry_conversion` argument to `otelcol.exporter.prometheus` + for converting resource attributes to Prometheus labels. (@hainenber) + +- `pyroscope.ebpf` support python on arm64 platforms. (@korniltsev) + +- `otelcol.receiver.prometheus` does not drop histograms without buckets anymore. (@wildum) + +- Added exemplars support to `otelcol.receiver.prometheus`. (@wildum) +- `mimir.rules.kubernetes` may now retry its startup on failure. (@hainenber) + +- Added links between compatible components in the documentation to make it + easier to discover them. (@thampiotr) + +- Allow defining `HTTPClientConfig` for `discovery.ec2`. (@cmbrad) + +- The `remote.http` component can optionally define a request body. (@tpaschalis) + +- Added support for `loki.write` to flush WAL on agent shutdown. (@thepalbi) + +- Add support for `integrations-next` static to flow config conversion. (@erikbaranowski) + +- Add support for passing extra arguments to the static converter such as `-config.expand-env`. (@erikbaranowski) + +- Added 'country' mmdb-type to log pipeline-stage geoip. (@superstes) + +- Azure exporter enhancements for flow and static mode, (@kgeckhart) + - Allows for pulling metrics at the Azure subscription level instead of resource by resource + - Disable dimension validation by default to reduce the number of exporter instances needed for full dimension coverage + +- Add `max_cache_size` to `prometheus.relabel` to allow configurability instead of hard coded 100,000. (@mattdurham) + +- Add support for `http_sd_config` within a `scrape_config` for prometheus to flow config conversion. (@erikbaranowski) +- `discovery.lightsail` now supports additional parameters for configuring HTTP client settings. (@ptodev) +- Add `sample_age_limit` to remote_write config to drop samples older than a specified duration. (@marctc) + +- Handle paths in the Kubelet URL for `discovery.kubelet`. (@petewall) + +### Bugfixes + +- Update `pyroscope.ebpf` to fix a logical bug causing to profile to many kthreads instead of regular processes https://github.com/grafana/pyroscope/pull/2778 (@korniltsev) + +- Update `pyroscope.ebpf` to produce more optimal pprof profiles for python processes https://github.com/grafana/pyroscope/pull/2788 (@korniltsev) + +- In Static mode's `traces` subsystem, `spanmetrics` used to be generated prior to load balancing. + This could lead to inaccurate metrics. This issue only affects Agents using both `spanmetrics` and + `load_balancing`, when running in a load balanced cluster with more than one Agent instance. (@ptodev) + +- Fixes `loki.source.docker` a behavior that synced an incomplete list of targets to the tailer manager. (@FerdinandvHagen) + +- Fixes `otelcol.connector.servicegraph` store ttl default value from 2ms to 2s. (@rlankfo) + +- Add staleness tracking to labelstore to reduce memory usage. (@mattdurham) + +- Fix issue where `prometheus.exporter.kafka` would crash when configuring `sasl_password`. (@rfratto) + +- Fix performance issue where perf lib where clause was not being set, leading to timeouts in collecting metrics for windows_exporter. (@mattdurham) + +- Fix nil panic when using the process collector with the windows exporter. (@mattdurham) + +### Other changes + +- Bump github.com/IBM/sarama from v1.41.2 to v1.42.1 + +- Attach unique Agent ID header to remote-write requests. (@captncraig) + +- Update to v2.48.1 of `github.com/prometheus/prometheus`. + Previously, a custom fork of v2.47.2 was used. + The custom fork of v2.47.2 also contained prometheus#12729 and prometheus#12677. + +v0.38.1 (2023-11-30) +-------------------- + +### Security fixes + +- Fix CVE-2023-47108 by updating `otelgrpc` from v0.45.0 to v0.46.0. (@hainenber) + +### Features + +- Agent Management: Introduce support for templated configuration. (@jcreixell) + +### Bugfixes + +- Permit `X-Faro-Session-ID` header in CORS requests for the `faro.receiver` + component (flow mode) and the `app_agent_receiver` integration (static mode). + (@cedricziel) + +- Fix issue with windows_exporter defaults not being set correctly. (@mattdurham) + +- Fix agent crash when process null OTel's fan out consumers. (@hainenber) + +- Fix issue in `prometheus.operator.*` where targets would be dropped if two crds share a common prefix in their names. (@Paul424, @captncraig) + +- Fix issue where `convert` command would generate incorrect Flow Mode config + when provided `promtail` configuration that uses `docker_sd_configs` (@thampiotr) + +- Fix converter issue with `loki.relabel` and `max_cache_size` being set to 0 instead of default (10_000). (@mattdurham) + +### Other changes + +- Add Agent Deploy Mode to usage report. (@captncraig) + +v0.38.0 (2023-11-21) +-------------------- + ### Breaking changes - Remove `otelcol.exporter.jaeger` component (@hainenber) @@ -26,11 +180,6 @@ Main (unreleased) - renamed 3 metrics starting with `mysql_perf_schema_transaction_` to start with `mysql_perf_schema_transactions_` to be consistent with column names. - exposing only server's own stats by matching `MEMBER_ID` with `@@server_uuid` resulting "member_id" label to be dropped. -### Other changes - -- Bump `mysqld_exporter` version to v0.15.0. (@marctc) -- Bump `github-exporter` version to 1.0.6. (@marctc) - ### Features - Added a new `stage.decolorize` stage to `loki.process` component which @@ -54,6 +203,8 @@ Main (unreleased) - Added support for python profiling to `pyroscope.ebpf` component. (@korniltsev) +- Added support for native Prometheus histograms to `otelcol.exporter.prometheus` (@wildum) + - Windows Flow Installer: Add /CONFIG /DISABLEPROFILING and /DISABLEREPORTING flag (@jkroepke) - Add queueing logs remote write client for `loki.write` when WAL is enabled. (@thepalbi) @@ -62,6 +213,9 @@ Main (unreleased) - `otelcol.processor.filter` - filters OTLP telemetry data using OpenTelemetry Transformation Language (OTTL). (@hainenber) + - `otelcol.receiver.vcenter` - receives metrics telemetry data from vCenter. (@marctc) + +- Agent Management: Introduce support for remotely managed external labels for logs. (@jcreixell) - Add `fallback_cache` to `remote.http` and `module.http`, which allows for caching the response of a request to a remote HTTP endpoint. Also adds DebugInfo @@ -95,13 +249,29 @@ Main (unreleased) - Make component list sortable in web UI. (@hainenber) - Adds new metrics (`mssql_server_total_memory_bytes`, `mssql_server_target_memory_bytes`, - and `mssql_available_commit_memory_bytes`) for `mssql` integration. + and `mssql_available_commit_memory_bytes`) for `mssql` integration (@StefanKurek). - Grafana Agent Operator: `config-reloader` container no longer runs as root. (@rootmout) +- Added support for replaying not sent data for `loki.write` when WAL is enabled. (@thepalbi) + +- Make the result of 'discovery.kubelet' support pods that without ports, such as k8s control plane static pods. (@masonmei) + +- Added support for unicode strings in `pyroscope.ebpf` python profiles. (@korniltsev) + +- Improved resilience of graph evaluation in presence of slow components. (@thampiotr) + +- Updated windows exporter to use prometheus-community/windows_exporter commit 1836cd1. (@mattdurham) + +- Allow agent to start with `module.git` config if cached before. (@hainenber) + +- Adds new optional config parameter `query_config` to `mssql` integration to allow for custom metrics (@StefanKurek) + ### Bugfixes +- Set exit code 1 on grafana-agentctl non-runnable command. (@fgouteroux) + - Fixed an issue where `loki.process` validation for stage `metric.counter` was allowing invalid combination of configuration options. (@thampiotr) @@ -134,6 +304,40 @@ Main (unreleased) - Fixed a bug where UDP syslog messages were never processed (@joshuapare) +- Updating configuration for `loki.write` no longer drops data. (@thepalbi) + +- Fixed a bug in WAL where exemplars were recorded before the first native histogram samples for new series, + resulting in remote write sending the exemplar first and Prometheus failing to ingest it due to missing + series. (@krajorama) + +- Fixed an issue in the static config converter where exporter instance values + were not being mapped when translating to flow. (@erikbaranowski) + +- Fix a bug which prevented Agent from running `otelcol.exporter.loadbalancing` + with a `routing_key` of `traceID`. (@ptodev) + +- Added Kubernetes service resolver to static node's loadbalancing exporter + and to Flow's `otelcol.exporter.loadbalancing`. (@ptodev) + +- Fix default configuration file `grafana-agent-flow.river` used in downstream + packages. (@bricewge) + +- Fix converter output for prometheus.exporter.windows to not unnecessarily add + empty blocks. (@erikbaranowski) + +### Other changes + +- Bump `mysqld_exporter` version to v0.15.0. (@marctc) + +- Bump `github-exporter` version to 1.0.6. (@marctc) + +- Use Go 1.21.4 for builds. (@rfratto) + +- Change User-Agent header for outbound requests to include agent-mode, goos, and deployment mode. Example `GrafanaAgent/v0.38.0 (flow; linux; docker)` (@captncraig) + +- `loki.source.windowsevent` and `loki.source.*` changed to use a more robust positions file to prevent corruption on reboots when writing + the positions file. (@mattdurham) + v0.37.4 (2023-11-06) ----------------- @@ -147,6 +351,9 @@ v0.37.4 (2023-11-06) - Fix a bug where reloading the configuration of a `loki.write` component lead to a panic. (@tpaschalis) +- Added Kubernetes service resolver to static node's loadbalancing exporter + and to Flow's `otelcol.exporter.loadbalancing`. (@ptodev) + v0.37.3 (2023-10-26) ----------------- diff --git a/CODEOWNERS b/CODEOWNERS index f836c3737ca6..4f1541f12dbd 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,25 +1,10 @@ -# The following groups are used to refer to a changing set of users: +# The CODEOWNERS file is used to define ownership of individuals or teams +# outside of the core set of Grafana Agent maintainers. # -# * @grafana/grafana-agent-core-maintainers: maintainers of type/core issues. -# * @grafana/grafana-agent-signals-maintainers: maintainers of type/signals issues. -# * @grafana/grafana-agent-operator-maintainers: maintainers of type/operator issues. -# * @grafana/grafana-agent-infrastructure-maintainers: maintainers of type/infrastructure issues. -# -# Other users may be listed explicitly if maintainership does not fall into one -# of the above groups. - -# The default owners for everything in the repo. Unless a later match takes -# precedence, these owners are requested for review whenever someone opens a -# pull request. -* @grafana/grafana-agent-core-maintainers - -# Some directories have shared ownership with the respective owners of the -# specific code for the PR being opened, so there's no CODEOWNERS. -/CHANGELOG.md -/component/all - -# Binaries: -/cmd/grafana-agent-operator/ @grafana/grafana-agent-operator-maintainers +# If a directory is not listed here, it is assumed to be owned by the +# @grafana/grafana-agent-maintainers; they are not explicitly listed as +# CODEOWNERS as a GitHub project board is used instead for PR tracking, which +# helps reduce notification noise of the members of that team. # `make docs` procedure and related workflows are owned by @jdbaldry. /.github/workflows/publish-technical-documentation-next.yml @jdbaldry @@ -30,25 +15,7 @@ /docs/variables.mk @jdbaldry # Documentation: -/docs/sources/ @clayton-cornell +/docs/sources/ @clayton-cornell # Components: -/component/discovery/ @grafana/grafana-agent-infrastructure-maintainers -/component/local/ @grafana/grafana-agent-infrastructure-maintainers -/component/loki/ @grafana/grafana-agent-signals-maintainers -/component/loki/source/podlogs/ @grafana/grafana-agent-infrastructure-maintainers -/component/mimir/rules/kubernetes/ @grafana/grafana-agent-infrastructure-maintainers -/component/otelcol/ @grafana/grafana-agent-signals-maintainers -/component/prometheus/ @grafana/grafana-agent-signals-maintainers -/component/prometheus/exporter/ @grafana/grafana-agent-infrastructure-maintainers -/component/prometheus/operator/ @grafana/grafana-agent-operator-maintainers -/component/pyroscope/ @grafana/grafana-agent-profiling-maintainers -/component/remote/ @grafana/grafana-agent-infrastructure-maintainers - -# Static mode packages: -/pkg/integrations/ @grafana/grafana-agent-infrastructure-maintainers -/pkg/logs/ @grafana/grafana-agent-signals-maintainers -/pkg/metrics/ @grafana/grafana-agent-signals-maintainers -/pkg/mimir/client/ @grafana/grafana-agent-infrastructure-maintainers -/pkg/operator/ @grafana/grafana-agent-operator-maintainers -/pkg/traces/ @grafana/grafana-agent-signals-maintainers +/component/pyroscope/ @grafana/grafana-agent-profiling-maintainers diff --git a/Makefile b/Makefile index 176287e03fd0..024a624d2223 100644 --- a/Makefile +++ b/Makefile @@ -287,7 +287,7 @@ smoke-image: # .PHONY: generate generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui generate-versioned-files -generate: generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui generate-versioned-files +generate: generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui generate-versioned-files generate-docs generate-crds: ifeq ($(USE_CONTAINER),1) @@ -350,6 +350,12 @@ else sh ./tools/gen-versioned-files/gen-versioned-files.sh endif +generate-docs: +ifeq ($(USE_CONTAINER),1) + $(RERUN_IN_CONTAINER) +else + go generate ./docs +endif # # Other targets # diff --git a/build-image/Dockerfile b/build-image/Dockerfile index fb3861df11d4..eeea8fdce018 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -23,7 +23,7 @@ FROM alpine:3.17 as helm RUN apk add --no-cache helm # Dependency: Go and Go dependencies -FROM golang:1.21.3-bullseye as golang +FROM golang:1.21.4-bullseye as golang # Keep in sync with cmd/grafana-agent-operator/DEVELOPERS.md ENV CONTROLLER_GEN_VERSION v0.9.2 diff --git a/build-image/windows/Dockerfile b/build-image/windows/Dockerfile index 1f4efb7beb2a..6664d8928e82 100644 --- a/build-image/windows/Dockerfile +++ b/build-image/windows/Dockerfile @@ -1,4 +1,4 @@ -FROM library/golang:1.21.3-windowsservercore-1809 +FROM library/golang:1.21.4-windowsservercore-1809 SHELL ["powershell", "-command"] diff --git a/cmd/grafana-agent-operator/Dockerfile b/cmd/grafana-agent-operator/Dockerfile index 1b797fc96d69..a86af13209bb 100644 --- a/cmd/grafana-agent-operator/Dockerfile +++ b/cmd/grafana-agent-operator/Dockerfile @@ -4,7 +4,7 @@ # default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set # in environment variables. -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.30.3 as build +FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.30.4 as build ARG BUILDPLATFORM ARG TARGETPLATFORM ARG TARGETOS @@ -22,7 +22,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} \ make operator -FROM ubuntu:lunar +FROM ubuntu:mantic LABEL org.opencontainers.image.source="https://github.com/grafana/agent" @@ -30,7 +30,6 @@ LABEL org.opencontainers.image.source="https://github.com/grafana/agent" RUN < maxSeenTimestamp { + maxSeenTimestamp = e.Timestamp.Unix() + } } // count all enqueued appended entries as received from WAL c.markerHandler.UpdateReceivedData(segment, len(entries.Entries)) @@ -293,6 +299,11 @@ func (c *queueClient) AppendEntries(entries wal.RefEntries, segment int) error { // TODO(thepalbi): Add metric here level.Debug(c.logger).Log("msg", "series for entry not found") } + + // It's safe to assume that upon an AppendEntries call, there will always be at least + // one entry. + c.qcMetrics.lastReadTimestamp.WithLabelValues().Set(float64(maxSeenTimestamp)) + return nil } @@ -509,7 +520,7 @@ func (c *queueClient) send(ctx context.Context, tenantID string, buf []byte) (in } req = req.WithContext(ctx) req.Header.Set("Content-Type", contentType) - req.Header.Set("User-Agent", UserAgent) + req.Header.Set("User-Agent", userAgent) // If the tenant ID is not empty promtail is running in multi-tenant mode, so // we should send it to Loki diff --git a/component/common/loki/client/queue_client_test.go b/component/common/loki/client/queue_client_test.go index a23804d44634..cf59f49e1b7a 100644 --- a/component/common/loki/client/queue_client_test.go +++ b/component/common/loki/client/queue_client_test.go @@ -135,8 +135,7 @@ func TestQueueClient(t *testing.T) { logger := log.NewLogfmtLogger(os.Stdout) - m := NewMetrics(reg) - qc, err := NewQueue(m, cfg, 0, 0, false, logger, nilMarkerHandler{}) + qc, err := NewQueue(NewMetrics(reg), NewQueueClientMetrics(reg).CurryWithId("test"), cfg, 0, 0, false, logger, nilMarkerHandler{}) require.NoError(t, err) //labels := model.LabelSet{"app": "test"} @@ -281,8 +280,7 @@ func runQueueClientBenchCase(b *testing.B, bc testCase, mhFactory func(t *testin logger := log.NewLogfmtLogger(os.Stdout) - m := NewMetrics(reg) - qc, err := NewQueue(m, cfg, 0, 0, false, logger, mhFactory(b)) + qc, err := NewQueue(NewMetrics(reg), NewQueueClientMetrics(reg).CurryWithId("test"), cfg, 0, 0, false, logger, mhFactory(b)) require.NoError(b, err) //labels := model.LabelSet{"app": "test"} diff --git a/component/common/loki/positions/write_positions_windows.go b/component/common/loki/positions/write_positions_windows.go index 939c85c5f2e0..5712a2e3c9b2 100644 --- a/component/common/loki/positions/write_positions_windows.go +++ b/component/common/loki/positions/write_positions_windows.go @@ -7,14 +7,11 @@ package positions // same place in case of a restart. import ( - "os" - "path/filepath" - + "bytes" + "github.com/natefinch/atomic" yaml "gopkg.in/yaml.v2" ) -// writePositionFile is a fallback for Windows because renameio does not support Windows. -// See https://github.com/google/renameio#windows-support func writePositionFile(filename string, positions map[Entry]string) error { buf, err := yaml.Marshal(File{ Positions: positions, @@ -22,14 +19,6 @@ func writePositionFile(filename string, positions map[Entry]string) error { if err != nil { return err } + return atomic.WriteFile(filename, bytes.NewReader(buf)) - target := filepath.Clean(filename) - temp := target + "-new" - - err = os.WriteFile(temp, buf, os.FileMode(positionFileMode)) - if err != nil { - return err - } - - return os.Rename(temp, target) } diff --git a/component/common/loki/wal/config.go b/component/common/loki/wal/config.go index c0d6c7ae2752..7c22d747c13d 100644 --- a/component/common/loki/wal/config.go +++ b/component/common/loki/wal/config.go @@ -10,8 +10,9 @@ const ( // DefaultWatchConfig is the opinionated defaults for operating the Watcher. var DefaultWatchConfig = WatchConfig{ - MinReadFrequency: time.Millisecond * 250, + MinReadFrequency: 250 * time.Millisecond, MaxReadFrequency: time.Second, + DrainTimeout: 15 * time.Second, } // Config contains all WAL-related settings. @@ -49,6 +50,10 @@ type WatchConfig struct { // MaxReadFrequency controls the maximum read frequency the Watcher polls the WAL for new records. As mentioned above // it caps the polling frequency to a maximum, to prevent to exponential backoff from making it too high. MaxReadFrequency time.Duration + + // DrainTimeout is the maximum amount of time that the Watcher can spend draining the remaining segments in the WAL. + // After that time, the Watcher is stopped immediately, dropping all the work in process. + DrainTimeout time.Duration } // UnmarshalYAML implement YAML Unmarshaler diff --git a/component/common/loki/wal/internal/watcher_state.go b/component/common/loki/wal/internal/watcher_state.go new file mode 100644 index 000000000000..c81413dfd230 --- /dev/null +++ b/component/common/loki/wal/internal/watcher_state.go @@ -0,0 +1,88 @@ +package internal + +import ( + "sync" + + "github.com/go-kit/log" + "github.com/grafana/agent/pkg/flow/logging/level" +) + +const ( + // StateRunning is the main functioning state of the watcher. It will keep tailing head segments, consuming closed + // ones, and checking for new ones. + StateRunning = iota + + // StateDraining is an intermediary state between running and stopping. The watcher will attempt to consume all the data + // found in the WAL, omitting errors and assuming all segments found are "closed", that is, no longer being written. + StateDraining + + // StateStopping means the Watcher is being stopped. It should drop all segment read activity, and exit promptly. + StateStopping +) + +// WatcherState is a holder for the state the Watcher is in. It provides handy methods for checking it it's stopping, getting +// the current state, or blocking until it has stopped. +type WatcherState struct { + current int + mut sync.RWMutex + stoppingSignal chan struct{} + logger log.Logger +} + +func NewWatcherState(l log.Logger) *WatcherState { + return &WatcherState{ + current: StateRunning, + stoppingSignal: make(chan struct{}), + logger: l, + } +} + +// Transition changes the state of WatcherState to next, reacting accordingly. +func (s *WatcherState) Transition(next int) { + s.mut.Lock() + defer s.mut.Unlock() + + level.Debug(s.logger).Log("msg", "watcher transitioning state", "currentState", printState(s.current), "nextState", printState(next)) + + // only perform channel close if the state is not already stopping + // expect s.s to be either draining ro running to perform a close + if next == StateStopping && s.current != next { + close(s.stoppingSignal) + } + + // update state + s.current = next +} + +// IsDraining evaluates to true if the current state is StateDraining. +func (s *WatcherState) IsDraining() bool { + s.mut.RLock() + defer s.mut.RUnlock() + return s.current == StateDraining +} + +// IsStopping evaluates to true if the current state is StateStopping. +func (s *WatcherState) IsStopping() bool { + s.mut.RLock() + defer s.mut.RUnlock() + return s.current == StateStopping +} + +// WaitForStopping returns a channel in which the called can read, effectively waiting until the state changes to stopping. +func (s *WatcherState) WaitForStopping() <-chan struct{} { + return s.stoppingSignal +} + +// printState prints a user-friendly name of the possible Watcher states. +func printState(state int) string { + switch state { + case StateRunning: + return "running" + case StateDraining: + return "draining" + case StateStopping: + return "stopping" + default: + return "unknown" + } +} diff --git a/component/common/loki/wal/watcher.go b/component/common/loki/wal/watcher.go index 0972f32f8f8a..f91e71b856dc 100644 --- a/component/common/loki/wal/watcher.go +++ b/component/common/loki/wal/watcher.go @@ -10,6 +10,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/agent/component/common/loki/wal/internal" "github.com/grafana/agent/pkg/flow/logging/level" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" @@ -80,7 +81,7 @@ type Watcher struct { actions WriteTo readNotify chan struct{} done chan struct{} - quit chan struct{} + state *internal.WatcherState walDir string logger log.Logger MaxSegment int @@ -88,6 +89,7 @@ type Watcher struct { metrics *WatcherMetrics minReadFreq time.Duration maxReadFreq time.Duration + drainTimeout time.Duration marker Marker savedSegment int } @@ -99,7 +101,7 @@ func NewWatcher(walDir, id string, metrics *WatcherMetrics, writeTo WriteTo, log id: id, actions: writeTo, readNotify: make(chan struct{}), - quit: make(chan struct{}), + state: internal.NewWatcherState(logger), done: make(chan struct{}), MaxSegment: -1, marker: marker, @@ -108,6 +110,7 @@ func NewWatcher(walDir, id string, metrics *WatcherMetrics, writeTo WriteTo, log metrics: metrics, minReadFreq: config.MinReadFrequency, maxReadFreq: config.MaxReadFrequency, + drainTimeout: config.DrainTimeout, } } @@ -121,18 +124,26 @@ func (w *Watcher) Start() { // retries. func (w *Watcher) mainLoop() { defer close(w.done) - for !isClosed(w.quit) { + for !w.state.IsStopping() { if w.marker != nil { w.savedSegment = w.marker.LastMarkedSegment() level.Debug(w.logger).Log("msg", "last saved segment", "segment", w.savedSegment) } - if err := w.run(); err != nil { + err := w.run() + if err != nil { level.Error(w.logger).Log("msg", "error tailing WAL", "err", err) } + if w.state.IsDraining() && errors.Is(err, os.ErrNotExist) { + level.Info(w.logger).Log("msg", "Reached non existing segment while draining, assuming end of WAL") + // since we've reached the end of the WAL, and the Watcher is draining, promptly transition to stopping state + // so the watcher can stoppingSignal early + w.state.Transition(internal.StateStopping) + } + select { - case <-w.quit: + case <-w.state.WaitForStopping(): return case <-time.After(5 * time.Second): } @@ -160,9 +171,8 @@ func (w *Watcher) run() error { } level.Debug(w.logger).Log("msg", "Tailing WAL", "currentSegment", currentSegment, "lastSegment", lastSegment) - for !isClosed(w.quit) { + for !w.state.IsStopping() { w.metrics.currentSegment.WithLabelValues(w.id).Set(float64(currentSegment)) - level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) // On start, we have a pointer to what is the latest segment. On subsequent calls to this function, // currentSegment will have been incremented, and we should open that segment. @@ -187,6 +197,8 @@ func (w *Watcher) run() error { // If tail is false, we know the segment we are "watching" over is closed (no further write will occur to it). Then, the // segment is read fully, any errors are logged as Warnings, and no error is returned. func (w *Watcher) watch(segmentNum int, tail bool) error { + level.Debug(w.logger).Log("msg", "Watching WAL segment", "currentSegment", segmentNum, "tail", tail) + segment, err := wlog.OpenReadSegment(wlog.SegmentName(w.walDir, segmentNum)) if err != nil { return err @@ -215,7 +227,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { for { select { - case <-w.quit: + case <-w.state.WaitForStopping(): return nil case <-segmentTicker.C: @@ -224,24 +236,30 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { return fmt.Errorf("segments: %w", err) } - // Check if new segments exists. - if last <= segmentNum { + // Check if new segments exists, or we are draining the WAL, which means that either: + // - This is the last segment, and we can consume it fully because we are draining the WAL + // - There's a segment after the current one, and we can consume this segment fully as well + if last <= segmentNum && !w.state.IsDraining() { continue } - // Since we know last > segmentNum, there must be a new segment. Read the remaining from the segmentNum segment - // and return from `watch` to read the next one + if w.state.IsDraining() { + level.Debug(w.logger).Log("msg", "Draining segment completely", "segment", segmentNum, "lastSegment", last) + } + + // We now that there's either a new segment (last > segmentNum), or we are draining the WAL. Either case, read + // the remaining data from the segmentNum and return from `watch` to read the next one. _, err = w.readSegment(reader, segmentNum) if debug { level.Warn(w.logger).Log("msg", "Error reading segment inside segmentTicker", "segment", segmentNum, "read", reader.Offset(), "err", err) } - // io.EOF error are non-fatal since we are tailing the wal + // io.EOF error are non-fatal since we are consuming the segment till the end if errors.Unwrap(err) != io.EOF { return err } - // return after reading the whole segment for creating a new LiveReader from the newly created segment + // return after reading the whole segment return nil // the cases below will unlock the select block, and execute the block below @@ -293,7 +311,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { func (w *Watcher) readSegment(r *wlog.LiveReader, segmentNum int) (bool, error) { var readData bool - for r.Next() && !isClosed(w.quit) { + for r.Next() && !w.state.IsStopping() { rec := r.Record() w.metrics.recordsRead.WithLabelValues(w.id).Inc() read, err := w.decodeAndDispatch(rec, segmentNum) @@ -331,9 +349,24 @@ func (w *Watcher) decodeAndDispatch(b []byte, segmentNum int) (bool, error) { return readData, firstErr } +// Drain moves the Watcher to a draining state, which will assume no more data is being written to the WAL, and it will +// attempt to read until the end of the last written segment. The calling routine of Drain will block until all data is +// read, or a timeout occurs. +func (w *Watcher) Drain() { + level.Info(w.logger).Log("msg", "Draining Watcher") + w.state.Transition(internal.StateDraining) + // wait for drain timeout, or stopping state, in case the Watcher does the transition itself promptly + select { + case <-time.NewTimer(w.drainTimeout).C: + level.Warn(w.logger).Log("msg", "Watcher drain timeout occurred, transitioning to Stopping") + case <-w.state.WaitForStopping(): + } +} + +// Stop stops the Watcher, shutting down the main routine. func (w *Watcher) Stop() { - // first close the quit channel to order main mainLoop routine to stop - close(w.quit) + w.state.Transition(internal.StateStopping) + // upon calling stop, wait for main mainLoop execution to stop <-w.done @@ -397,16 +430,6 @@ func (w *Watcher) findNextSegmentFor(index int) (int, error) { return -1, errors.New("failed to find segment for index") } -// isClosed checks in a non-blocking manner if a channel is closed or not. -func isClosed(c chan struct{}) bool { - select { - case <-c: - return true - default: - return false - } -} - // readSegmentNumbers reads the given directory and returns all segment identifiers, that is, the index of each segment // file. func readSegmentNumbers(dir string) ([]int, error) { diff --git a/component/common/loki/wal/watcher_metrics.go b/component/common/loki/wal/watcher_metrics.go index 4064f8b22aac..ce8052fd442d 100644 --- a/component/common/loki/wal/watcher_metrics.go +++ b/component/common/loki/wal/watcher_metrics.go @@ -1,6 +1,9 @@ package wal -import "github.com/prometheus/client_golang/prometheus" +import ( + "github.com/grafana/agent/pkg/util" + "github.com/prometheus/client_golang/prometheus" +) type WatcherMetrics struct { recordsRead *prometheus.CounterVec @@ -80,23 +83,13 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { } if reg != nil { - m.recordsRead = mustRegisterOrGet(reg, m.recordsRead).(*prometheus.CounterVec) - m.recordDecodeFails = mustRegisterOrGet(reg, m.recordDecodeFails).(*prometheus.CounterVec) - m.droppedWriteNotifications = mustRegisterOrGet(reg, m.droppedWriteNotifications).(*prometheus.CounterVec) - m.segmentRead = mustRegisterOrGet(reg, m.segmentRead).(*prometheus.CounterVec) - m.currentSegment = mustRegisterOrGet(reg, m.currentSegment).(*prometheus.GaugeVec) - m.watchersRunning = mustRegisterOrGet(reg, m.watchersRunning).(*prometheus.GaugeVec) + m.recordsRead = util.MustRegisterOrGet(reg, m.recordsRead).(*prometheus.CounterVec) + m.recordDecodeFails = util.MustRegisterOrGet(reg, m.recordDecodeFails).(*prometheus.CounterVec) + m.droppedWriteNotifications = util.MustRegisterOrGet(reg, m.droppedWriteNotifications).(*prometheus.CounterVec) + m.segmentRead = util.MustRegisterOrGet(reg, m.segmentRead).(*prometheus.CounterVec) + m.currentSegment = util.MustRegisterOrGet(reg, m.currentSegment).(*prometheus.GaugeVec) + m.watchersRunning = util.MustRegisterOrGet(reg, m.watchersRunning).(*prometheus.GaugeVec) } return m } - -func mustRegisterOrGet(reg prometheus.Registerer, c prometheus.Collector) prometheus.Collector { - if err := reg.Register(c); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - return are.ExistingCollector - } - panic(err) - } - return c -} diff --git a/component/common/loki/wal/watcher_test.go b/component/common/loki/wal/watcher_test.go index a24b7ff63049..959dad3a5ff5 100644 --- a/component/common/loki/wal/watcher_test.go +++ b/component/common/loki/wal/watcher_test.go @@ -3,6 +3,7 @@ package wal import ( "fmt" "os" + "strings" "testing" "time" @@ -12,6 +13,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/tsdb/record" "github.com/stretchr/testify/require" + "go.uber.org/atomic" "github.com/grafana/agent/component/common/loki" "github.com/grafana/agent/component/common/loki/utils" @@ -64,12 +66,9 @@ func (t *testWriteTo) AssertContainsLines(tst *testing.T, lines ...string) { } t.ReadEntries.DoneIterate() - allSeen := true - for _, wasSeen := range seen { - allSeen = allSeen && wasSeen + for line, wasSeen := range seen { + require.True(tst, wasSeen, "expected to have received line: %s", line) } - - require.True(tst, allSeen, "expected all entries to have been received") } // watcherTestResources contains all resources necessary to test an individual Watcher functionality @@ -571,3 +570,228 @@ func TestWatcher_Replay(t *testing.T) { writeTo.AssertContainsLines(t, segment2Lines...) }) } + +// slowWriteTo mimics the combination of a WriteTo and a slow remote write client. This will allow us to have a writer +// that moves faster than the WAL watcher, and therefore, test the draining procedure. +type slowWriteTo struct { + t *testing.T + entriesReceived atomic.Uint64 + sleepAfterAppendEntries time.Duration +} + +func (s *slowWriteTo) SeriesReset(segmentNum int) { +} + +func (s *slowWriteTo) StoreSeries(series []record.RefSeries, segmentNum int) { +} + +func (s *slowWriteTo) AppendEntries(entries wal.RefEntries, segmentNum int) error { + // only log on development debug flag + if debug { + var allLines strings.Builder + for _, e := range entries.Entries { + allLines.WriteString(e.Line) + allLines.WriteString("/") + } + s.t.Logf("AppendEntries called from segment %d - %s", segmentNum, allLines.String()) + } + + s.entriesReceived.Add(uint64(len(entries.Entries))) + time.Sleep(s.sleepAfterAppendEntries) + return nil +} + +func TestWatcher_StopAndDrainWAL(t *testing.T) { + labels := model.LabelSet{ + "app": "test", + } + logger := level.NewFilter(log.NewLogfmtLogger(os.Stdout), level.AllowDebug()) + + // newTestingResources is a helper for bootstrapping all required testing resources + newTestingResources := func(t *testing.T, cfg WatchConfig) (*slowWriteTo, *Watcher, WAL) { + reg := prometheus.NewRegistry() + dir := t.TempDir() + metrics := NewWatcherMetrics(reg) + + // the slow write to will take one second on each AppendEntries operation + writeTo := &slowWriteTo{ + t: t, + sleepAfterAppendEntries: time.Second, + } + + watcher := NewWatcher(dir, "test", metrics, writeTo, logger, cfg, mockMarker{ + LastMarkedSegmentFunc: func() int { + // Ignore marker to read from last segment, which is none + return -1 + }, + }) + + // start watcher, and burn through WAL as we write to it + watcher.Start() + + wl, err := New(Config{ + Enabled: true, + Dir: dir, + }, logger, reg) + require.NoError(t, err) + return writeTo, watcher, wl + } + + t.Run("watcher drains WAL just in time", func(t *testing.T) { + cfg := DefaultWatchConfig + // considering the slow write to has a 1 second delay when Appending an entry, and before the draining begins, + // the watcher would have consumed only 5 entries, this timeout will give the Watcher just enough time to fully + // drain the WAL. + cfg.DrainTimeout = time.Second * 16 + writeTo, watcher, wl := newTestingResources(t, cfg) + defer wl.Close() + + ew := newEntryWriter() + + // helper to add context to each written line + var lineCounter atomic.Int64 + writeNLines := func(t *testing.T, n int) { + for i := 0; i < n; i++ { + // First, write to segment 0. This will be the last "marked" segment + err := ew.WriteEntry(loki.Entry{ + Labels: labels, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: fmt.Sprintf("test line %d", lineCounter.Load()), + }, + }, wl, logger) + lineCounter.Add(1) + require.NoError(t, err) + } + } + + // The test will write the WAL while the Watcher is running. First, 10 lines will be written to a segment, and the test + // will wait for the Watcher to have read 5 lines. After, a new segment will be cut, 10 other lines written, and the + // Watcher stopped with drain. The test will expect all 20 lines in total to have been received. + + writeNLines(t, 10) + + require.Eventually(t, func() bool { + return writeTo.entriesReceived.Load() >= 5 + }, time.Second*11, time.Millisecond*500, "expected the write to catch up to half of the first segment") + + _, err := wl.NextSegment() + require.NoError(t, err) + writeNLines(t, 10) + require.NoError(t, wl.Sync()) + + // Upon calling Stop drain, the Watcher should finish burning through segment 0, and also consume segment 1 + now := time.Now() + watcher.Drain() + watcher.Stop() + + // expecting 15s (missing 15 entries * 1 sec delay in AppendEntries) +/- 2.0s (taking into account the drain timeout + // has one extra second. + require.InDelta(t, time.Second*15, time.Since(now), float64(time.Millisecond*2000), "expected the drain procedure to take around 15s") + require.Equal(t, int(writeTo.entriesReceived.Load()), 20, "expected the watcher to fully drain the WAL") + }) + + t.Run("watcher should exit promptly after draining completely", func(t *testing.T) { + cfg := DefaultWatchConfig + // the drain timeout will be too long, for the amount of data remaining in the WAL (~15 entries more) + cfg.DrainTimeout = time.Second * 30 + writeTo, watcher, wl := newTestingResources(t, cfg) + defer wl.Close() + + ew := newEntryWriter() + + // helper to add context to each written line + var lineCounter atomic.Int64 + writeNLines := func(t *testing.T, n int) { + for i := 0; i < n; i++ { + // First, write to segment 0. This will be the last "marked" segment + err := ew.WriteEntry(loki.Entry{ + Labels: labels, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: fmt.Sprintf("test line %d", lineCounter.Load()), + }, + }, wl, logger) + lineCounter.Add(1) + require.NoError(t, err) + } + } + + // The test will write the WAL while the Watcher is running. First, 10 lines will be written to a segment, and the test + // will wait for the Watcher to have read 5 lines. After, a new segment will be cut, 10 other lines written, and the + // Watcher stopped with drain. The test will expect all 20 lines in total to have been received. + + writeNLines(t, 10) + + require.Eventually(t, func() bool { + return writeTo.entriesReceived.Load() >= 5 + }, time.Second*11, time.Millisecond*500, "expected the write to catch up to half of the first segment") + + _, err := wl.NextSegment() + require.NoError(t, err) + writeNLines(t, 10) + require.NoError(t, wl.Sync()) + + // Upon calling Stop drain, the Watcher should finish burning through segment 0, and also consume segment 1 + now := time.Now() + watcher.Drain() + watcher.Stop() + + // expecting 15s (missing 15 entries * 1 sec delay in AppendEntries) +/- 2.0s (taking into account the drain timeout + // has one extra second. + require.InDelta(t, time.Second*15, time.Since(now), float64(time.Millisecond*2000), "expected the drain procedure to take around 15s") + require.Equal(t, int(writeTo.entriesReceived.Load()), 20, "expected the watcher to fully drain the WAL") + }) + + t.Run("watcher drain timeout too short, should exit promptly", func(t *testing.T) { + cfg := DefaultWatchConfig + // having a 10 seconds timeout should give the watcher enough time to only consume ~10 entries, and be missing ~5 + // from the last segment + cfg.DrainTimeout = time.Second * 10 + writeTo, watcher, wl := newTestingResources(t, cfg) + defer wl.Close() + + ew := newEntryWriter() + + // helper to add context to each written line + var lineCounter atomic.Int64 + writeNLines := func(t *testing.T, n int) { + for i := 0; i < n; i++ { + // First, write to segment 0. This will be the last "marked" segment + err := ew.WriteEntry(loki.Entry{ + Labels: labels, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: fmt.Sprintf("test line %d", lineCounter.Load()), + }, + }, wl, logger) + lineCounter.Add(1) + require.NoError(t, err) + } + } + + // The test will write the WAL while the Watcher is running. First, 10 lines will be written to a segment, and the test + // will wait for the Watcher to have read 5 lines. After, a new segment will be cut, 10 other lines written, and the + // Watcher stopped with drain. The test will expect all 20 lines in total to have been received. + + writeNLines(t, 10) + + require.Eventually(t, func() bool { + return writeTo.entriesReceived.Load() >= 5 + }, time.Second*11, time.Millisecond*500, "expected the write to catch up to half of the first segment") + + _, err := wl.NextSegment() + require.NoError(t, err) + writeNLines(t, 10) + require.NoError(t, wl.Sync()) + + // Upon calling Stop drain, the Watcher should finish burning through segment 0, and also consume segment 1 + now := time.Now() + watcher.Drain() + watcher.Stop() + + require.InDelta(t, time.Second*10, time.Since(now), float64(time.Millisecond*2000), "expected the drain procedure to take around 15s") + require.Less(t, int(writeTo.entriesReceived.Load()), 20, "expected watcher to have not consumed WAL fully") + require.InDelta(t, 15, int(writeTo.entriesReceived.Load()), 1.0, "expected Watcher to consume at most +/- 1 entry from the WAL") + }) +} diff --git a/component/common/loki/wal/writer.go b/component/common/loki/wal/writer.go index 929199529c5d..e71773d944d6 100644 --- a/component/common/loki/wal/writer.go +++ b/component/common/loki/wal/writer.go @@ -59,6 +59,7 @@ type Writer struct { reclaimedOldSegmentsSpaceCounter *prometheus.CounterVec lastReclaimedSegment *prometheus.GaugeVec + lastWrittenTimestamp *prometheus.GaugeVec closeCleaner chan struct{} } @@ -96,10 +97,17 @@ func NewWriter(walCfg Config, logger log.Logger, reg prometheus.Registerer) (*Wr Name: "last_reclaimed_segment", Help: "Last reclaimed segment number", }, []string{}) + wrt.lastWrittenTimestamp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "loki_write", + Subsystem: "wal_writer", + Name: "last_written_timestamp", + Help: "Latest timestamp that was written to the WAL", + }, []string{}) if reg != nil { _ = reg.Register(wrt.reclaimedOldSegmentsSpaceCounter) _ = reg.Register(wrt.lastReclaimedSegment) + _ = reg.Register(wrt.lastWrittenTimestamp) } wrt.start(walCfg.MaxSegmentAge) @@ -118,6 +126,9 @@ func (wrt *Writer) start(maxSegmentAge time.Duration) { continue } + // emit metric with latest written timestamp, to be able to track delay from writer to watcher + wrt.lastWrittenTimestamp.WithLabelValues().Set(float64(e.Timestamp.Unix())) + wrt.writeSubscribersLock.RLock() for _, s := range wrt.writeSubscribers { s.NotifyWrite() diff --git a/component/discovery/aws/ec2.go b/component/discovery/aws/ec2.go index 566527d2f67e..dfc6d00f5d53 100644 --- a/component/discovery/aws/ec2.go +++ b/component/discovery/aws/ec2.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/grafana/agent/component" + "github.com/grafana/agent/component/common/config" "github.com/grafana/agent/component/discovery" "github.com/grafana/river/rivertypes" promcfg "github.com/prometheus/common/config" @@ -42,18 +43,21 @@ type EC2Arguments struct { RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` Port int `river:"port,attr,optional"` Filters []*EC2Filter `river:"filter,block,optional"` + + HTTPClientConfig config.HTTPClientConfig `river:",squash"` } func (args EC2Arguments) Convert() *promaws.EC2SDConfig { cfg := &promaws.EC2SDConfig{ - Endpoint: args.Endpoint, - Region: args.Region, - AccessKey: args.AccessKey, - SecretKey: promcfg.Secret(args.SecretKey), - Profile: args.Profile, - RoleARN: args.RoleARN, - RefreshInterval: model.Duration(args.RefreshInterval), - Port: args.Port, + Endpoint: args.Endpoint, + Region: args.Region, + AccessKey: args.AccessKey, + SecretKey: promcfg.Secret(args.SecretKey), + Profile: args.Profile, + RoleARN: args.RoleARN, + RefreshInterval: model.Duration(args.RefreshInterval), + Port: args.Port, + HTTPClientConfig: *args.HTTPClientConfig.Convert(), } for _, f := range args.Filters { cfg.Filters = append(cfg.Filters, &promaws.EC2Filter{ @@ -65,8 +69,9 @@ func (args EC2Arguments) Convert() *promaws.EC2SDConfig { } var DefaultEC2SDConfig = EC2Arguments{ - Port: 80, - RefreshInterval: 60 * time.Second, + Port: 80, + RefreshInterval: 60 * time.Second, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/aws/ec2_test.go b/component/discovery/aws/ec2_test.go new file mode 100644 index 000000000000..7696d750a4ff --- /dev/null +++ b/component/discovery/aws/ec2_test.go @@ -0,0 +1,29 @@ +package aws + +import ( + "net/url" + "testing" + + "github.com/grafana/agent/component/common/config" + "github.com/stretchr/testify/require" + "gotest.tools/assert" +) + +func TestConvert(t *testing.T) { + // parse example proxy + u, err := url.Parse("http://example:8080") + require.NoError(t, err) + httpClientConfig := config.DefaultHTTPClientConfig + httpClientConfig.ProxyURL = config.URL{URL: u} + + // example configuration + riverArgs := EC2Arguments{ + Region: "us-east-1", + HTTPClientConfig: httpClientConfig, + } + + // ensure values are set + promArgs := riverArgs.Convert() + assert.Equal(t, "us-east-1", promArgs.Region) + assert.Equal(t, "http://example:8080", promArgs.HTTPClientConfig.ProxyURL.String()) +} diff --git a/component/discovery/aws/lightsail.go b/component/discovery/aws/lightsail.go index 3f47366cc8b7..2b414a54faff 100644 --- a/component/discovery/aws/lightsail.go +++ b/component/discovery/aws/lightsail.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/grafana/agent/component" + "github.com/grafana/agent/component/common/config" "github.com/grafana/agent/component/discovery" "github.com/grafana/river/rivertypes" promcfg "github.com/prometheus/common/config" @@ -27,34 +28,37 @@ func init() { // LightsailArguments is the configuration for AWS Lightsail based service discovery. type LightsailArguments struct { - Endpoint string `river:"endpoint,attr,optional"` - Region string `river:"region,attr,optional"` - AccessKey string `river:"access_key,attr,optional"` - SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` - Profile string `river:"profile,attr,optional"` - RoleARN string `river:"role_arn,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` + Endpoint string `river:"endpoint,attr,optional"` + Region string `river:"region,attr,optional"` + AccessKey string `river:"access_key,attr,optional"` + SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` + Profile string `river:"profile,attr,optional"` + RoleARN string `river:"role_arn,attr,optional"` + RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Port int `river:"port,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `river:",squash"` } func (args LightsailArguments) Convert() *promaws.LightsailSDConfig { cfg := &promaws.LightsailSDConfig{ - Endpoint: args.Endpoint, - Region: args.Region, - AccessKey: args.AccessKey, - SecretKey: promcfg.Secret(args.SecretKey), - Profile: args.Profile, - RoleARN: args.RoleARN, - RefreshInterval: model.Duration(args.RefreshInterval), - Port: args.Port, + Endpoint: args.Endpoint, + Region: args.Region, + AccessKey: args.AccessKey, + SecretKey: promcfg.Secret(args.SecretKey), + Profile: args.Profile, + RoleARN: args.RoleARN, + RefreshInterval: model.Duration(args.RefreshInterval), + Port: args.Port, + HTTPClientConfig: *args.HTTPClientConfig.Convert(), } return cfg } // DefaultLightsailSDConfig is the default Lightsail SD configuration. var DefaultLightsailSDConfig = LightsailArguments{ - Port: 80, - RefreshInterval: 60 * time.Second, + Port: 80, + RefreshInterval: 60 * time.Second, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/azure/azure.go b/component/discovery/azure/azure.go index 3e1ef563625c..9ed1363f5250 100644 --- a/component/discovery/azure/azure.go +++ b/component/discovery/azure/azure.go @@ -55,6 +55,8 @@ var DefaultArguments = Arguments{ Environment: azure.PublicCloud.Name, Port: 80, RefreshInterval: 5 * time.Minute, + FollowRedirects: true, + EnableHTTP2: true, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/consul/consul.go b/component/discovery/consul/consul.go index 1192bae6c6d2..de6aae2d4510 100644 --- a/component/discovery/consul/consul.go +++ b/component/discovery/consul/consul.go @@ -45,11 +45,12 @@ type Arguments struct { } var DefaultArguments = Arguments{ - Server: "localhost:8500", - TagSeparator: ",", - Scheme: "http", - AllowStale: true, - RefreshInterval: 30 * time.Second, + Server: "localhost:8500", + TagSeparator: ",", + Scheme: "http", + AllowStale: true, + RefreshInterval: 30 * time.Second, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/digitalocean/digitalocean.go b/component/discovery/digitalocean/digitalocean.go index 360ef70ce818..bde15337da88 100644 --- a/component/discovery/digitalocean/digitalocean.go +++ b/component/discovery/digitalocean/digitalocean.go @@ -39,6 +39,8 @@ type Arguments struct { var DefaultArguments = Arguments{ Port: 80, RefreshInterval: time.Minute, + FollowRedirects: true, + EnableHTTP2: true, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/kubelet/kubelet.go b/component/discovery/kubelet/kubelet.go index 53032c424968..1fecc1e88f8e 100644 --- a/component/discovery/kubelet/kubelet.go +++ b/component/discovery/kubelet/kubelet.go @@ -34,6 +34,7 @@ const ( podNameLabel = metaLabelPrefix + "pod_name" podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" + podContainerIDLabel = metaLabelPrefix + "pod_container_id" podContainerImageLabel = metaLabelPrefix + "pod_container_image" podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name" podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number" @@ -128,11 +129,10 @@ func NewKubeletDiscovery(args Arguments) (*Discovery, error) { Transport: transport, Timeout: 30 * time.Second, } - // ensure the path is the kubelet pods endpoint - args.URL.Path = "/pods" + // Append the path to the kubelet pods endpoint return &Discovery{ client: client, - url: args.URL.String(), + url: args.URL.String() + "/pods", targetNamespaces: args.Namespaces, }, nil } @@ -214,6 +214,27 @@ func (d *Discovery) buildPodTargetGroup(pod v1.Pod) *targetgroup.Group { containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) for i, c := range containers { isInit := i >= len(pod.Spec.Containers) + cStatuses := &pod.Status.ContainerStatuses + if isInit { + cStatuses = &pod.Status.InitContainerStatuses + } + cID := d.findPodContainerID(cStatuses, c.Name) + + // If no ports are defined for the container, create an anonymous + // target per container. + if len(c.Ports) == 0 { + // We don't have a port so we just set the address label to the pod IP. + // The user has to add a port manually. + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: lv(pod.Status.PodIP), + podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), + podContainerImageLabel: lv(c.Image), + podContainerIsInit: lv(strconv.FormatBool(isInit)), + }) + continue + } + for _, port := range c.Ports { ports := strconv.FormatUint(uint64(port.ContainerPort), 10) addr := net.JoinHostPort(pod.Status.PodIP, ports) @@ -221,6 +242,7 @@ func (d *Discovery) buildPodTargetGroup(pod v1.Pod) *targetgroup.Group { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(addr), podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerPortNumberLabel: lv(ports), podContainerPortNameLabel: lv(port.Name), @@ -233,6 +255,23 @@ func (d *Discovery) buildPodTargetGroup(pod v1.Pod) *targetgroup.Group { return tg } +func (p *Discovery) findPodContainerStatus(statuses *[]v1.ContainerStatus, containerName string) (*v1.ContainerStatus, error) { + for _, s := range *statuses { + if s.Name == containerName { + return &s, nil + } + } + return nil, fmt.Errorf("cannot find container with name %v", containerName) +} + +func (p *Discovery) findPodContainerID(statuses *[]v1.ContainerStatus, containerName string) string { + cStatus, err := p.findPodContainerStatus(statuses, containerName) + if err != nil { + return "" + } + return cStatus.ContainerID +} + func (d *Discovery) podInTargetNamespaces(pod v1.Pod) bool { for _, ns := range d.targetNamespaces { if pod.Namespace == ns { diff --git a/component/discovery/kubelet/kubelet_test.go b/component/discovery/kubelet/kubelet_test.go index dd73a42d1b3d..183f789aef70 100644 --- a/component/discovery/kubelet/kubelet_test.go +++ b/component/discovery/kubelet/kubelet_test.go @@ -1,12 +1,14 @@ package kubelet import ( + "net/url" "testing" "github.com/prometheus/prometheus/discovery/targetgroup" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/grafana/agent/component/common/config" "github.com/grafana/river" "github.com/stretchr/testify/require" ) @@ -88,3 +90,37 @@ func newPod(name, namespace string) v1.Pod { }, } } + +func TestDiscoveryPodWithoutPod(t *testing.T) { + pod1 := newPod("pod-1", "namespace-1") + pod2 := newPod("pod-2", "namespace-2") + pod1.Spec.Containers[0].Ports = []v1.ContainerPort{} + + podList1 := v1.PodList{ + Items: []v1.Pod{pod1, pod2}, + } + + kubeletDiscovery, err := NewKubeletDiscovery(DefaultConfig) + require.NoError(t, err) + + _, err = kubeletDiscovery.refresh(podList1) + require.NoError(t, err) + require.Len(t, kubeletDiscovery.discoveredPodSources, 2) +} + +func TestWithDefaultKubeletHost(t *testing.T) { + kubeletDiscovery, err := NewKubeletDiscovery(DefaultConfig) + require.NoError(t, err) + require.Equal(t, "https://localhost:10250/pods", kubeletDiscovery.url) +} + +func TestWithCustomPath(t *testing.T) { + kubeletProxyUrl, _ := url.Parse("https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy") + kubeletDiscovery, err := NewKubeletDiscovery(Arguments{ + URL: config.URL{ + URL: kubeletProxyUrl, + }, + }) + require.NoError(t, err) + require.Equal(t, "https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy/pods", kubeletDiscovery.url) +} diff --git a/component/discovery/ovhcloud/ovhcloud.go b/component/discovery/ovhcloud/ovhcloud.go new file mode 100644 index 000000000000..e3479f45a5f7 --- /dev/null +++ b/component/discovery/ovhcloud/ovhcloud.go @@ -0,0 +1,94 @@ +package ovhcloud + +import ( + "fmt" + "time" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/river/rivertypes" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" +) + +func init() { + component.Register(component.Registration{ + Name: "discovery.ovhcloud", + Args: Arguments{}, + Exports: discovery.Exports{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + return New(opts, args.(Arguments)) + }, + }) +} + +// Arguments configure the discovery.ovhcloud component. +type Arguments struct { + Endpoint string `river:"endpoint,attr,optional"` + ApplicationKey string `river:"application_key,attr"` + ApplicationSecret rivertypes.Secret `river:"application_secret,attr"` + ConsumerKey rivertypes.Secret `river:"consumer_key,attr"` + RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Service string `river:"service,attr"` +} + +// DefaultArguments is used to initialize default values for Arguments. +var DefaultArguments = Arguments{ + Endpoint: "ovh-eu", + RefreshInterval: 60 * time.Second, +} + +// SetToDefault implements river.Defaulter. +func (args *Arguments) SetToDefault() { + *args = DefaultArguments +} + +// Validate implements river.Validator. +func (args *Arguments) Validate() error { + if args.Endpoint == "" { + return fmt.Errorf("endpoint cannot be empty") + } + + if args.ApplicationKey == "" { + return fmt.Errorf("application_key cannot be empty") + } + + if args.ApplicationSecret == "" { + return fmt.Errorf("application_secret cannot be empty") + } + + if args.ConsumerKey == "" { + return fmt.Errorf("consumer_key cannot be empty") + } + + switch args.Service { + case "dedicated_server", "vps": + // Valid value - do nothing. + default: + return fmt.Errorf("unknown service: %v", args.Service) + } + + return nil +} + +// Convert returns the upstream configuration struct. +func (args *Arguments) Convert() *prom_discovery.SDConfig { + return &prom_discovery.SDConfig{ + Endpoint: args.Endpoint, + ApplicationKey: args.ApplicationKey, + ApplicationSecret: config.Secret(args.ApplicationSecret), + ConsumerKey: config.Secret(args.ConsumerKey), + RefreshInterval: model.Duration(args.RefreshInterval), + Service: args.Service, + } +} + +// New returns a new instance of a discovery.ovhcloud component. +func New(opts component.Options, args Arguments) (*discovery.Component, error) { + return discovery.New(opts, args, func(args component.Arguments) (discovery.Discoverer, error) { + newArgs := args.(Arguments) + return prom_discovery.NewDiscovery(newArgs.Convert(), opts.Logger) + }) +} diff --git a/component/discovery/ovhcloud/ovhcloud_test.go b/component/discovery/ovhcloud/ovhcloud_test.go new file mode 100644 index 000000000000..8e579574fc67 --- /dev/null +++ b/component/discovery/ovhcloud/ovhcloud_test.go @@ -0,0 +1,135 @@ +package ovhcloud_test + +import ( + "testing" + "time" + + "github.com/grafana/agent/component/discovery/ovhcloud" + "github.com/grafana/river" + "github.com/prometheus/common/model" + prom_ovh "github.com/prometheus/prometheus/discovery/ovhcloud" + "github.com/stretchr/testify/require" +) + +func TestUnmarshal(t *testing.T) { + tests := []struct { + testName string + cfg string + expected *prom_ovh.SDConfig + errorMsg string + }{ + { + testName: "defaults", + cfg: ` + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "dedicated_server" + `, + expected: &prom_ovh.SDConfig{ + Endpoint: ovhcloud.DefaultArguments.Endpoint, + ApplicationKey: "appkey", + ApplicationSecret: "appsecret", + ConsumerKey: "consumerkey", + RefreshInterval: model.Duration(ovhcloud.DefaultArguments.RefreshInterval), + Service: "dedicated_server", + }, + }, + { + testName: "explicit", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "vps" + `, + expected: &prom_ovh.SDConfig{ + Endpoint: "custom-endpoint", + ApplicationKey: "appkey", + ApplicationSecret: "appsecret", + ConsumerKey: "consumerkey", + RefreshInterval: model.Duration(11 * time.Minute), + Service: "vps", + }, + }, + { + testName: "empty application key", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "vps" + `, + errorMsg: "application_key cannot be empty", + }, + { + testName: "empty application secret", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "" + consumer_key = "consumerkey" + service = "vps" + `, + errorMsg: "application_secret cannot be empty", + }, + { + testName: "empty consumer key", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "" + service = "vps" + `, + errorMsg: "consumer_key cannot be empty", + }, + { + testName: "empty endpoint", + cfg: ` + endpoint = "" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "vps" + `, + errorMsg: "endpoint cannot be empty", + }, + { + testName: "unknown service", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "asdf" + `, + errorMsg: "unknown service: asdf", + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args ovhcloud.Arguments + err := river.Unmarshal([]byte(tc.cfg), &args) + if tc.errorMsg != "" { + require.ErrorContains(t, err, tc.errorMsg) + return + } + + require.NoError(t, err) + + promArgs := args.Convert() + + require.Equal(t, tc.expected, promArgs) + }) + } +} diff --git a/component/faro/receiver/handler.go b/component/faro/receiver/handler.go index fb8511e0bbde..636f00859e2b 100644 --- a/component/faro/receiver/handler.go +++ b/component/faro/receiver/handler.go @@ -69,7 +69,7 @@ func (h *handler) Update(args ServerArguments) { if len(args.CORSAllowedOrigins) > 0 { h.cors = cors.New(cors.Options{ AllowedOrigins: args.CORSAllowedOrigins, - AllowedHeaders: []string{apiKeyHeader, "content-type"}, + AllowedHeaders: []string{apiKeyHeader, "content-type", "x-faro-session-id"}, }) } else { h.cors = nil // Disable cors. diff --git a/component/faro/receiver/sourcemaps.go b/component/faro/receiver/sourcemaps.go index 7f1183c27c27..0c679052b6a9 100644 --- a/component/faro/receiver/sourcemaps.go +++ b/component/faro/receiver/sourcemaps.go @@ -18,7 +18,7 @@ import ( "github.com/go-sourcemap/sourcemap" "github.com/grafana/agent/component/faro/receiver/internal/payload" "github.com/grafana/agent/pkg/flow/logging/level" - "github.com/minio/pkg/wildcard" + "github.com/grafana/agent/pkg/util/wildcard" "github.com/prometheus/client_golang/prometheus" "github.com/vincent-petithory/dataurl" ) diff --git a/component/loki/process/stages/geoip.go b/component/loki/process/stages/geoip.go index 0b4787bd0e2a..a157e236d9b9 100644 --- a/component/loki/process/stages/geoip.go +++ b/component/loki/process/stages/geoip.go @@ -27,6 +27,7 @@ type GeoIPFields int const ( CITYNAME GeoIPFields = iota COUNTRYNAME + COUNTRYCODE CONTINENTNAME CONTINENTCODE LOCATION @@ -34,11 +35,14 @@ const ( TIMEZONE SUBDIVISIONNAME SUBDIVISIONCODE + ASN + ASNORG ) var fields = map[GeoIPFields]string{ CITYNAME: "geoip_city_name", COUNTRYNAME: "geoip_country_name", + COUNTRYCODE: "geoip_country_code", CONTINENTNAME: "geoip_continent_name", CONTINENTCODE: "geoip_continent_code", LOCATION: "geoip_location", @@ -46,6 +50,8 @@ var fields = map[GeoIPFields]string{ TIMEZONE: "geoip_timezone", SUBDIVISIONNAME: "geoip_subdivision_name", SUBDIVISIONCODE: "geoip_subdivision_code", + ASN: "geoip_autonomous_system_number", + ASNORG: "geoip_autonomous_system_organization", } // GeoIPConfig represents GeoIP stage config @@ -69,7 +75,7 @@ func validateGeoIPConfig(c GeoIPConfig) (map[string]*jmespath.JMESPath, error) { } switch c.DBType { - case "", "asn", "city": + case "", "asn", "city", "country": default: return nil, ErrEmptyDBTypeGeoIPStageConfig } @@ -182,6 +188,14 @@ func (g *geoIPStage) process(_ model.LabelSet, extracted map[string]interface{}) return } g.populateExtractedWithASNData(extracted, &record) + case "country": + var record geoip2.Country + err := g.mmdb.Lookup(ip, &record) + if err != nil { + level.Error(g.logger).Log("msg", "unable to get Country record for the ip", "err", err, "ip", ip) + return + } + g.populateExtractedWithCountryData(extracted, &record) default: level.Error(g.logger).Log("msg", "unknown database type") } @@ -210,6 +224,11 @@ func (g *geoIPStage) populateExtractedWithCityData(extracted map[string]interfac if contryName != "" { extracted[label] = contryName } + case COUNTRYCODE: + contryCode := record.Country.IsoCode + if contryCode != "" { + extracted[label] = contryCode + } case CONTINENTNAME: continentName := record.Continent.Names["en"] if continentName != "" { @@ -252,20 +271,51 @@ func (g *geoIPStage) populateExtractedWithCityData(extracted map[string]interfac extracted[label] = subdivisionCode } } - default: - level.Error(g.logger).Log("msg", "unknown geoip field") } } } func (g *geoIPStage) populateExtractedWithASNData(extracted map[string]interface{}, record *geoip2.ASN) { - autonomousSystemNumber := record.AutonomousSystemNumber - autonomousSystemOrganization := record.AutonomousSystemOrganization - if autonomousSystemNumber != 0 { - extracted["geoip_autonomous_system_number"] = autonomousSystemNumber + for field, label := range fields { + switch field { + case ASN: + autonomousSystemNumber := record.AutonomousSystemNumber + if autonomousSystemNumber != 0 { + extracted[label] = autonomousSystemNumber + } + case ASNORG: + autonomousSystemOrganization := record.AutonomousSystemOrganization + if autonomousSystemOrganization != "" { + extracted[label] = autonomousSystemOrganization + } + } } - if autonomousSystemOrganization != "" { - extracted["geoip_autonomous_system_organization"] = autonomousSystemOrganization +} + +func (g *geoIPStage) populateExtractedWithCountryData(extracted map[string]interface{}, record *geoip2.Country) { + for field, label := range fields { + switch field { + case COUNTRYNAME: + contryName := record.Country.Names["en"] + if contryName != "" { + extracted[label] = contryName + } + case COUNTRYCODE: + contryCode := record.Country.IsoCode + if contryCode != "" { + extracted[label] = contryCode + } + case CONTINENTNAME: + continentName := record.Continent.Names["en"] + if continentName != "" { + extracted[label] = continentName + } + case CONTINENTCODE: + continentCode := record.Continent.Code + if continentCode != "" { + extracted[label] = continentCode + } + } } } diff --git a/component/loki/process/stages/geoip_test.go b/component/loki/process/stages/geoip_test.go index 2e53afa025f8..26d1802f74de 100644 --- a/component/loki/process/stages/geoip_test.go +++ b/component/loki/process/stages/geoip_test.go @@ -2,11 +2,21 @@ package stages import ( "errors" + "fmt" + "net" "testing" + util_log "github.com/grafana/loki/pkg/util/log" + "github.com/oschwald/geoip2-golang" + "github.com/oschwald/maxminddb-golang" "github.com/stretchr/testify/require" ) +var ( + geoipTestIP string = "192.0.2.1" + geoipTestSource string = "dummy" +) + func Test_ValidateConfigs(t *testing.T) { source := "ip" tests := []struct { @@ -21,6 +31,14 @@ func Test_ValidateConfigs(t *testing.T) { }, nil, }, + { + GeoIPConfig{ + DB: "test", + Source: &source, + DBType: "country", + }, + nil, + }, { GeoIPConfig{ DB: "test", @@ -81,3 +99,153 @@ func Test_ValidateConfigs(t *testing.T) { } } } + +/* + NOTE: + database schema: https://github.com/maxmind/MaxMind-DB/tree/main/source-data + Script used to build the minimal binaries: https://github.com/vimt/MaxMind-DB-Writer-python +*/ + +func Test_MaxmindAsn(t *testing.T) { + mmdb, err := maxminddb.Open("testdata/geoip_maxmind_asn.mmdb") + if err != nil { + t.Error(err) + return + } + defer mmdb.Close() + + var record geoip2.ASN + err = mmdb.Lookup(net.ParseIP(geoipTestIP), &record) + if err != nil { + t.Error(err) + } + + config := GeoIPConfig{ + DB: "test", + Source: &geoipTestSource, + DBType: "asn", + } + valuesExpressions, err := validateGeoIPConfig(config) + if err != nil { + t.Errorf("Error validating test-config: %v", err) + } + testStage := &geoIPStage{ + mmdb: mmdb, + logger: util_log.Logger, + valuesExpressions: valuesExpressions, + cfgs: config, + } + + extracted := map[string]interface{}{} + testStage.populateExtractedWithASNData(extracted, &record) + + for _, field := range []string{ + fields[ASN], + fields[ASNORG], + } { + _, present := extracted[field] + if !present { + t.Errorf("GeoIP label %v not present", field) + } + } +} + +func Test_MaxmindCity(t *testing.T) { + mmdb, err := maxminddb.Open("testdata/geoip_maxmind_city.mmdb") + if err != nil { + t.Error(err) + return + } + defer mmdb.Close() + + var record geoip2.City + err = mmdb.Lookup(net.ParseIP(geoipTestIP), &record) + if err != nil { + t.Error(err) + } + + config := GeoIPConfig{ + DB: "test", + Source: &geoipTestSource, + DBType: "city", + } + valuesExpressions, err := validateGeoIPConfig(config) + if err != nil { + t.Errorf("Error validating test-config: %v", err) + } + testStage := &geoIPStage{ + mmdb: mmdb, + logger: util_log.Logger, + valuesExpressions: valuesExpressions, + cfgs: config, + } + + extracted := map[string]interface{}{} + testStage.populateExtractedWithCityData(extracted, &record) + + for _, field := range []string{ + fields[COUNTRYNAME], + fields[COUNTRYCODE], + fields[CONTINENTNAME], + fields[CONTINENTCODE], + fields[CITYNAME], + fmt.Sprintf("%s_latitude", fields[LOCATION]), + fmt.Sprintf("%s_longitude", fields[LOCATION]), + fields[POSTALCODE], + fields[TIMEZONE], + fields[SUBDIVISIONNAME], + fields[SUBDIVISIONCODE], + fields[COUNTRYNAME], + } { + _, present := extracted[field] + if !present { + t.Errorf("GeoIP label %v not present", field) + } + } +} + +func Test_MaxmindCountry(t *testing.T) { + mmdb, err := maxminddb.Open("testdata/geoip_maxmind_country.mmdb") + if err != nil { + t.Error(err) + return + } + defer mmdb.Close() + + var record geoip2.Country + err = mmdb.Lookup(net.ParseIP(geoipTestIP), &record) + if err != nil { + t.Error(err) + } + + config := GeoIPConfig{ + DB: "test", + Source: &geoipTestSource, + DBType: "country", + } + valuesExpressions, err := validateGeoIPConfig(config) + if err != nil { + t.Errorf("Error validating test-config: %v", err) + } + testStage := &geoIPStage{ + mmdb: mmdb, + logger: util_log.Logger, + valuesExpressions: valuesExpressions, + cfgs: config, + } + + extracted := map[string]interface{}{} + testStage.populateExtractedWithCountryData(extracted, &record) + + for _, field := range []string{ + fields[COUNTRYNAME], + fields[COUNTRYCODE], + fields[CONTINENTNAME], + fields[CONTINENTCODE], + } { + _, present := extracted[field] + if !present { + t.Errorf("GeoIP label %v not present", field) + } + } +} diff --git a/component/loki/process/stages/testdata/geoip_maxmind_asn.mmdb b/component/loki/process/stages/testdata/geoip_maxmind_asn.mmdb new file mode 100644 index 000000000000..4abd5e255736 Binary files /dev/null and b/component/loki/process/stages/testdata/geoip_maxmind_asn.mmdb differ diff --git a/component/loki/process/stages/testdata/geoip_maxmind_city.mmdb b/component/loki/process/stages/testdata/geoip_maxmind_city.mmdb new file mode 100644 index 000000000000..72d5e818ec7f Binary files /dev/null and b/component/loki/process/stages/testdata/geoip_maxmind_city.mmdb differ diff --git a/component/loki/process/stages/testdata/geoip_maxmind_country.mmdb b/component/loki/process/stages/testdata/geoip_maxmind_country.mmdb new file mode 100644 index 000000000000..b285d57cef13 Binary files /dev/null and b/component/loki/process/stages/testdata/geoip_maxmind_country.mmdb differ diff --git a/component/loki/process/stages/testdata/geoip_source.json b/component/loki/process/stages/testdata/geoip_source.json new file mode 100644 index 000000000000..6023eb8560aa --- /dev/null +++ b/component/loki/process/stages/testdata/geoip_source.json @@ -0,0 +1,14 @@ +[ + { + "type": "GeoLite2-ASN", + "data": {"192.0.2.0/24": {"autonomous_system_number": 1337, "autonomous_system_organization": "Just a Test"}} + }, + { + "type": "GeoIP2-Country", + "data": {"192.0.2.0/24": {"continent": {"code": "NA", "geoname_id": 6255149, "names": {"de": "Nordamerika", "en": "North America", "es": "Norteam\u00e9rica", "fr": "Am\u00e9rique du Nord", "ja": "\u5317\u30a2\u30e1\u30ea\u30ab", "pt-BR": "Am\u00e9rica do Norte", "ru": "\u0421\u0435\u0432\u0435\u0440\u043d\u0430\u044f \u0410\u043c\u0435\u0440\u0438\u043a", "zh-CN": "\u5317\u7f8e\u6d32"}}, "country": {"geoname_id": 6252001, "is_in_european_union": false, "iso_code": "US", "names": {"de": "Vereinigte Staaten", "en": "United States", "es": "Estados Unidos", "fr": "\u00c9tats Unis", "ja": "\u30a2\u30e1\u30ea\u30ab", "pt-BR": "EUA", "ru": "\u0421\u0428\u0410", "zh-CN": "\u7f8e\u56fd"}}, "registered_country": {"geoname_id": 6252001, "is_in_european_union": false, "iso_code": "US", "names": {"de": "Vereinigte Staaten", "en": "United States", "es": "Estados Unidos", "fr": "\u00c9tats Unis", "ja": "\u30a2\u30e1\u30ea\u30ab", "pt-BR": "EUA", "ru": "\u0421\u0428\u0410", "zh-CN": "\u7f8e\u56fd"}}, "traits": {"is_anonymous_proxy": true, "is_satellite_provider": true, "is_anycast": true}}} + }, + { + "type": "GeoIP2-City", + "data": {"192.0.2.0/24": {"continent": {"code": "EU", "geoname_id": 6255148, "names": {"de": "Europa", "en": "Europe", "es": "Europa", "fr": "Europe", "ja": "\u30e8\u30fc\u30ed\u30c3\u30d1", "pt-BR": "Europa", "ru": "\u0415\u0432\u0440\u043e\u043f\u0430", "zh-CN": "\u6b27\u6d32"}}, "country": {"geoname_id": 2635167, "is_in_european_union": false, "iso_code": "GB", "names": {"de": "Vereinigtes K\u00f6nigreich", "en": "United Kingdom", "es": "Reino Unido", "fr": "Royaume-Uni", "ja": "\u30a4\u30ae\u30ea\u30b9", "pt-BR": "Reino Unido", "ru": "\u0412\u0435\u043b\u0438\u043a\u043e\u0431\u0440\u0438\u0442\u0430\u043d\u0438\u044f", "zh-CN": "\u82f1\u56fd"}}, "registered_country": {"geoname_id": 6252001, "is_in_european_union": false, "iso_code": "US", "names": {"de": "USA", "en": "United States", "es": "Estados Unidos", "fr": "\u00c9tats-Unis", "ja": "\u30a2\u30e1\u30ea\u30ab\u5408\u8846\u56fd", "pt-BR": "Estados Unidos", "ru": "\u0421\u0428\u0410", "zh-CN": "\u7f8e\u56fd"}}, "traits": {"is_anonymous_proxy": true, "is_satellite_provider": true, "is_anycast": true}, "location": {"accuracy_radius": 100, "latitude": 51.5142, "longitude": -0.0931, "time_zone": "Europe/London"}, "postal": {"code": "OX1"}, "city": {"geoname_id": 2643743, "names": {"de": "London", "en": "London", "es": "Londres", "fr": "Londres", "ja": "\u30ed\u30f3\u30c9\u30f3", "pt-BR": "Londres", "ru": "\u041b\u043e\u043d\u0434\u043e\u043d"}}, "subdivisions": [{"geoname_id": 6269131, "iso_code": "ENG", "names": {"en": "England", "es": "Inglaterra", "fr": "Angleterre", "pt-BR": "Inglaterra"}}]}} + } +] \ No newline at end of file diff --git a/component/loki/source/docker/docker.go b/component/loki/source/docker/docker.go index 584838f244fc..400b1d30b5e6 100644 --- a/component/loki/source/docker/docker.go +++ b/component/loki/source/docker/docker.go @@ -22,7 +22,7 @@ import ( flow_relabel "github.com/grafana/agent/component/common/relabel" "github.com/grafana/agent/component/discovery" dt "github.com/grafana/agent/component/loki/source/docker/internal/dockertarget" - "github.com/grafana/agent/pkg/build" + "github.com/grafana/agent/internal/useragent" "github.com/grafana/agent/pkg/flow/logging/level" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -40,7 +40,7 @@ func init() { }) } -var userAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version) +var userAgent = useragent.Get() const ( dockerLabel = model.MetaLabelPrefix + "docker_" @@ -242,11 +242,11 @@ func (c *Component) Update(args component.Arguments) error { return err } targets = append(targets, tgt) - - // This will never fail because it only fails if the context gets canceled. - _ = c.manager.syncTargets(context.Background(), targets) } + // This will never fail because it only fails if the context gets canceled. + _ = c.manager.syncTargets(context.Background(), targets) + c.args = newArgs return nil } diff --git a/component/loki/source/kubernetes/kubernetes.go b/component/loki/source/kubernetes/kubernetes.go index ce11017c6f87..80792520e9c3 100644 --- a/component/loki/source/kubernetes/kubernetes.go +++ b/component/loki/source/kubernetes/kubernetes.go @@ -24,9 +24,8 @@ import ( func init() { component.Register(component.Registration{ - Name: "loki.source.kubernetes", - Args: Arguments{}, - NeedsServices: []string{cluster.ServiceName}, + Name: "loki.source.kubernetes", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) diff --git a/component/loki/source/podlogs/podlogs.go b/component/loki/source/podlogs/podlogs.go index 8fa4b48b96c0..f7a194e4b79a 100644 --- a/component/loki/source/podlogs/podlogs.go +++ b/component/loki/source/podlogs/podlogs.go @@ -26,9 +26,8 @@ import ( func init() { component.Register(component.Registration{ - Name: "loki.source.podlogs", - Args: Arguments{}, - NeedsServices: []string{cluster.ServiceName}, + Name: "loki.source.podlogs", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) diff --git a/component/loki/source/windowsevent/bookmark.go b/component/loki/source/windowsevent/bookmark.go new file mode 100644 index 000000000000..1997427ce50b --- /dev/null +++ b/component/loki/source/windowsevent/bookmark.go @@ -0,0 +1,91 @@ +//go:build windows +// +build windows + +// This code is copied from Promtail v1.6.2-0.20231004111112-07cbef92268a with minor changes. + +package windowsevent + +import ( + "bytes" + "errors" + "io" + "io/fs" + "os" + + "github.com/natefinch/atomic" + + "github.com/grafana/loki/clients/pkg/promtail/targets/windows/win_eventlog" +) + +type bookMark struct { + handle win_eventlog.EvtHandle + isNew bool + path string + buf []byte +} + +// newBookMark creates a new windows event bookmark. +// The bookmark will be saved at the given path. Use save to save the current position for a given event. +func newBookMark(path string) (*bookMark, error) { + // 16kb buffer for rendering bookmark + buf := make([]byte, 16<<10) + + _, err := os.Stat(path) + // creates a new bookmark file if none exists. + if errors.Is(err, fs.ErrNotExist) { + _, err := os.Create(path) + if err != nil { + return nil, err + } + bm, err := win_eventlog.CreateBookmark("") + if err != nil { + return nil, err + } + return &bookMark{ + handle: bm, + path: path, + isNew: true, + buf: buf, + }, nil + } + if err != nil { + return nil, err + } + // otherwise open the current one. + file, err := os.OpenFile(path, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + defer file.Close() + fileContent, err := io.ReadAll(file) + if err != nil { + return nil, err + } + fileString := string(fileContent) + // load the current bookmark. + bm, err := win_eventlog.CreateBookmark(fileString) + if err != nil { + // If we errored likely due to incorrect data then create a blank one + bm, err = win_eventlog.CreateBookmark("") + fileString = "" + // This should never fail but just in case. + if err != nil { + return nil, err + } + } + return &bookMark{ + handle: bm, + path: path, + isNew: fileString == "", + buf: buf, + }, nil +} + +// save Saves the bookmark at the current event position. +func (b *bookMark) save(event win_eventlog.EvtHandle) error { + newBookmark, err := win_eventlog.UpdateBookmark(b.handle, event, b.buf) + if err != nil { + return err + } + return atomic.WriteFile(b.path, bytes.NewReader([]byte(newBookmark))) +} diff --git a/component/loki/source/windowsevent/component_windows.go b/component/loki/source/windowsevent/component_windows.go index 2dc969029126..c11673ce1a49 100644 --- a/component/loki/source/windowsevent/component_windows.go +++ b/component/loki/source/windowsevent/component_windows.go @@ -11,7 +11,6 @@ import ( "github.com/grafana/agent/component/common/loki/utils" "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" - "github.com/grafana/loki/clients/pkg/promtail/targets/windows" ) func init() { @@ -35,7 +34,7 @@ type Component struct { mut sync.RWMutex args Arguments - target *windows.Target + target *Target handle *handler receivers []loki.LogsReceiver } @@ -123,7 +122,7 @@ func (c *Component) Update(args component.Arguments) error { _ = f.Close() } - winTarget, err := windows.New(c.opts.Logger, c.handle, nil, convertConfig(newArgs)) + winTarget, err := NewTarget(c.opts.Logger, c.handle, nil, convertConfig(newArgs)) if err != nil { return err } diff --git a/component/loki/source/windowsevent/format.go b/component/loki/source/windowsevent/format.go new file mode 100644 index 000000000000..eb521a7757a4 --- /dev/null +++ b/component/loki/source/windowsevent/format.go @@ -0,0 +1,121 @@ +//go:build windows +// +build windows + +// This code is copied from Promtail v1.6.2-0.20231004111112-07cbef92268a with minor changes. + +package windowsevent + +import ( + "fmt" + "syscall" + + jsoniter "github.com/json-iterator/go" + + "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" + "github.com/grafana/loki/clients/pkg/promtail/targets/windows/win_eventlog" +) + +type Event struct { + Source string `json:"source,omitempty"` + Channel string `json:"channel,omitempty"` + Computer string `json:"computer,omitempty"` + EventID int `json:"event_id,omitempty"` + Version int `json:"version,omitempty"` + + Level int `json:"level,omitempty"` + Task int `json:"task,omitempty"` + Opcode int `json:"opCode,omitempty"` + + LevelText string `json:"levelText,omitempty"` + TaskText string `json:"taskText,omitempty"` + OpcodeText string `json:"opCodeText,omitempty"` + + Keywords string `json:"keywords,omitempty"` + TimeCreated string `json:"timeCreated,omitempty"` + EventRecordID int `json:"eventRecordID,omitempty"` + Correlation *Correlation `json:"correlation,omitempty"` + Execution *Execution `json:"execution,omitempty"` + + Security *Security `json:"security,omitempty"` + UserData string `json:"user_data,omitempty"` + EventData string `json:"event_data,omitempty"` + Message string `json:"message,omitempty"` +} + +type Security struct { + UserID string `json:"userId,omitempty"` + UserName string `json:"userName,omitempty"` +} + +type Execution struct { + ProcessID uint32 `json:"processId,omitempty"` + ThreadID uint32 `json:"threadId,omitempty"` + ProcessName string `json:"processName,omitempty"` +} + +type Correlation struct { + ActivityID string `json:"activityID,omitempty"` + RelatedActivityID string `json:"relatedActivityID,omitempty"` +} + +// formatLine format a Loki log line from a windows event. +func formatLine(cfg *scrapeconfig.WindowsEventsTargetConfig, event win_eventlog.Event) (string, error) { + structuredEvent := Event{ + Source: event.Source.Name, + Channel: event.Channel, + Computer: event.Computer, + EventID: event.EventID, + Version: event.Version, + Level: event.Level, + Task: event.Task, + Opcode: event.Opcode, + LevelText: event.LevelText, + TaskText: event.TaskText, + OpcodeText: event.OpcodeText, + Keywords: event.Keywords, + TimeCreated: event.TimeCreated.SystemTime, + EventRecordID: event.EventRecordID, + } + + if !cfg.ExcludeEventData { + structuredEvent.EventData = string(event.EventData.InnerXML) + } + if !cfg.ExcludeUserData { + structuredEvent.UserData = string(event.UserData.InnerXML) + } + if !cfg.ExcludeEventMessage { + structuredEvent.Message = event.Message + } + if event.Correlation.ActivityID != "" || event.Correlation.RelatedActivityID != "" { + structuredEvent.Correlation = &Correlation{ + ActivityID: event.Correlation.ActivityID, + RelatedActivityID: event.Correlation.RelatedActivityID, + } + } + // best effort to get the username of the event. + if event.Security.UserID != "" { + var userName string + usid, err := syscall.StringToSid(event.Security.UserID) + if err == nil { + username, domain, _, err := usid.LookupAccount("") + if err == nil { + userName = fmt.Sprint(domain, "\\", username) + } + } + structuredEvent.Security = &Security{ + UserID: event.Security.UserID, + UserName: userName, + } + } + if event.Execution.ProcessID != 0 { + structuredEvent.Execution = &Execution{ + ProcessID: event.Execution.ProcessID, + ThreadID: event.Execution.ThreadID, + } + _, _, processName, err := win_eventlog.GetFromSnapProcess(event.Execution.ProcessID) + if err == nil { + structuredEvent.Execution.ProcessName = processName + } + } + return jsoniter.MarshalToString(structuredEvent) +} diff --git a/component/loki/source/windowsevent/target.go b/component/loki/source/windowsevent/target.go new file mode 100644 index 000000000000..60c719f40a66 --- /dev/null +++ b/component/loki/source/windowsevent/target.go @@ -0,0 +1,230 @@ +//go:build windows +// +build windows + +// This code is copied from Promtail v1.6.2-0.20231004111112-07cbef92268a with minor changes. + +package windowsevent + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/relabel" + "golang.org/x/sys/windows" + + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" + "github.com/grafana/loki/clients/pkg/promtail/targets/target" + "github.com/grafana/loki/clients/pkg/promtail/targets/windows/win_eventlog" + + util_log "github.com/grafana/loki/pkg/util/log" +) + +type Target struct { + subscription win_eventlog.EvtHandle + handler api.EntryHandler + cfg *scrapeconfig.WindowsEventsTargetConfig + relabelConfig []*relabel.Config + logger log.Logger + + bm *bookMark // bookmark to save positions. + fetcher *win_eventlog.EventFetcher + + ready bool + done chan struct{} + wg sync.WaitGroup + err error +} + +// NewTarget create a new windows targets, that will fetch windows event logs and send them to Loki. +func NewTarget( + logger log.Logger, + handler api.EntryHandler, + relabel []*relabel.Config, + cfg *scrapeconfig.WindowsEventsTargetConfig, +) (*Target, error) { + sigEvent, err := windows.CreateEvent(nil, 0, 0, nil) + if err != nil { + return nil, err + } + defer windows.CloseHandle(sigEvent) + + bm, err := newBookMark(cfg.BookmarkPath) + if err != nil { + return nil, fmt.Errorf("failed to create bookmark using path=%s: %w", cfg.BookmarkPath, err) + } + + t := &Target{ + done: make(chan struct{}), + cfg: cfg, + bm: bm, + relabelConfig: relabel, + logger: logger, + handler: handler, + fetcher: win_eventlog.NewEventFetcher(), + } + + if cfg.Query == "" { + cfg.Query = "*" + } + + var subsHandle win_eventlog.EvtHandle + if bm.isNew { + subsHandle, err = win_eventlog.EvtSubscribe(cfg.EventlogName, cfg.Query) + } else { + subsHandle, err = win_eventlog.EvtSubscribeWithBookmark(cfg.EventlogName, cfg.Query, bm.handle) + } + + if err != nil { + return nil, fmt.Errorf("error subscribing to windows events: %w", err) + } + t.subscription = subsHandle + + if t.cfg.PollInterval == 0 { + t.cfg.PollInterval = 3 * time.Second + } + go t.loop() + return t, nil +} + +// loop fetches new events and send them to via the Loki client. +func (t *Target) loop() { + t.ready = true + t.wg.Add(1) + interval := time.NewTicker(t.cfg.PollInterval) + defer func() { + t.ready = false + t.wg.Done() + interval.Stop() + }() + + for { + + loop: + for { + // fetch events until there's no more. + events, handles, err := t.fetcher.FetchEvents(t.subscription, t.cfg.Locale) + if err != nil { + if err != win_eventlog.ERROR_NO_MORE_ITEMS { + t.err = err + level.Error(util_log.Logger).Log("msg", "error fetching events", "err", err) + } + break loop + } + t.err = nil + // we have received events to handle. + for i, entry := range t.renderEntries(events) { + t.handler.Chan() <- entry + if err := t.bm.save(handles[i]); err != nil { + t.err = err + level.Error(util_log.Logger).Log("msg", "error saving bookmark", "err", err) + } + } + win_eventlog.Close(handles) + + } + // no more messages we wait for next poll timer tick. + select { + case <-t.done: + return + case <-interval.C: + } + } +} + +// renderEntries renders Loki entries from windows event logs +func (t *Target) renderEntries(events []win_eventlog.Event) []api.Entry { + res := make([]api.Entry, 0, len(events)) + lbs := labels.NewBuilder(nil) + for _, event := range events { + entry := api.Entry{ + Labels: make(model.LabelSet), + } + + entry.Timestamp = time.Now() + if t.cfg.UseIncomingTimestamp { + timeStamp, err := time.Parse(time.RFC3339Nano, fmt.Sprintf("%v", event.TimeCreated.SystemTime)) + if err != nil { + level.Warn(t.logger).Log("msg", "error parsing timestamp", "err", err) + } else { + entry.Timestamp = timeStamp + } + } + // Add constant labels + for k, v := range t.cfg.Labels { + lbs.Set(string(k), string(v)) + } + // discover labels + if channel := model.LabelValue(event.Channel); channel != "" && channel.IsValid() { + lbs.Set("channel", event.Channel) + } + if computer := model.LabelValue(event.Computer); computer != "" && computer.IsValid() { + lbs.Set("computer", event.Computer) + } + // apply relabelings. + processed, _ := relabel.Process(lbs.Labels(), t.relabelConfig...) + + for _, lbl := range processed { + if strings.HasPrefix(lbl.Name, "__") { + continue + } + entry.Labels[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) + } + + line, err := formatLine(t.cfg, event) + if err != nil { + level.Warn(t.logger).Log("msg", "error formatting event", "err", err) + continue + } + entry.Line = line + res = append(res, entry) + } + return res +} + +// Type returns WindowsTargetType. +func (t *Target) Type() target.TargetType { + return target.WindowsTargetType +} + +// Ready indicates whether or not the windows target is ready. +func (t *Target) Ready() bool { + if t.err != nil { + return false + } + return t.ready +} + +// DiscoveredLabels returns discovered labels from the target. +func (t *Target) DiscoveredLabels() model.LabelSet { + // todo(cyriltovena) we might want to sample discovered labels later and returns them here. + return nil +} + +// Labels returns the set of labels that statically apply to all log entries +// produced by the windows target. +func (t *Target) Labels() model.LabelSet { + return t.cfg.Labels +} + +// Details returns target-specific details. +func (t *Target) Details() interface{} { + if t.err != nil { + return map[string]string{"err": t.err.Error()} + } + return map[string]string{} +} + +func (t *Target) Stop() error { + close(t.done) + t.wg.Wait() + t.handler.Stop() + return t.err +} diff --git a/component/loki/write/types.go b/component/loki/write/types.go index 2959f1b681a1..dc240c675e98 100644 --- a/component/loki/write/types.go +++ b/component/loki/write/types.go @@ -82,7 +82,7 @@ type QueueConfig struct { func (q *QueueConfig) SetToDefault() { *q = QueueConfig{ Capacity: 10 * units.MiB, // considering the default BatchSize of 1MiB, this gives us a default buffered channel of size 10 - DrainTimeout: time.Minute, + DrainTimeout: 15 * time.Second, } } diff --git a/component/loki/write/write.go b/component/loki/write/write.go index 45d4aa063ec3..65fd04c6f692 100644 --- a/component/loki/write/write.go +++ b/component/loki/write/write.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/agent/component/common/loki/client" "github.com/grafana/agent/component/common/loki/limit" "github.com/grafana/agent/component/common/loki/wal" - "github.com/grafana/agent/pkg/build" + "github.com/grafana/agent/internal/agentseed" ) func init() { @@ -25,8 +25,6 @@ func init() { return New(opts, args.(Arguments)) }, }) - - client.UserAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version) } // Arguments holds values which are used to configure the loki.write component. @@ -44,6 +42,7 @@ type WalArguments struct { MaxSegmentAge time.Duration `river:"max_segment_age,attr,optional"` MinReadFrequency time.Duration `river:"min_read_frequency,attr,optional"` MaxReadFrequency time.Duration `river:"max_read_frequency,attr,optional"` + DrainTimeout time.Duration `river:"drain_timeout,attr,optional"` } func (wa *WalArguments) Validate() error { @@ -61,6 +60,7 @@ func (wa *WalArguments) SetToDefault() { MaxSegmentAge: wal.DefaultMaxSegmentAge, MinReadFrequency: wal.DefaultWatchConfig.MinReadFrequency, MaxReadFrequency: wal.DefaultWatchConfig.MaxReadFrequency, + DrainTimeout: wal.DefaultWatchConfig.DrainTimeout, } } @@ -84,7 +84,7 @@ type Component struct { receiver loki.LogsReceiver // remote write components - clientManger client.Client + clientManger *client.Manager walWriter *wal.Writer // sink is the place where log entries received by this component should be written to. If WAL @@ -114,16 +114,31 @@ func New(o component.Options, args Arguments) (*Component, error) { // Run implements component.Component. func (c *Component) Run(ctx context.Context) error { + defer func() { + // when exiting Run, proceed to shut down first the writer component, and then + // the client manager, with the WAL and remote-write client inside + if c.walWriter != nil { + c.walWriter.Stop() + } + if c.clientManger != nil { + // drain, since the component is shutting down. That means the agent is shutting down as well + c.clientManger.StopWithDrain(true) + } + }() + for { select { case <-ctx.Done(): return nil case entry := <-c.receiver.Chan(): + c.mut.RLock() select { case <-ctx.Done(): + c.mut.RUnlock() return nil case c.sink.Chan() <- entry: } + c.mut.RUnlock() } } } @@ -140,16 +155,25 @@ func (c *Component) Update(args component.Arguments) error { c.walWriter.Stop() } if c.clientManger != nil { + // only drain on component shutdown c.clientManger.Stop() } cfgs := newArgs.convertClientConfigs() + uid := agentseed.Get().UID + for _, cfg := range cfgs { + if cfg.Headers == nil { + cfg.Headers = map[string]string{} + } + cfg.Headers[agentseed.HeaderName] = uid + } walCfg := wal.Config{ Enabled: newArgs.WAL.Enabled, MaxSegmentAge: newArgs.WAL.MaxSegmentAge, WatchConfig: wal.WatchConfig{ MinReadFrequency: newArgs.WAL.MinReadFrequency, MaxReadFrequency: newArgs.WAL.MaxReadFrequency, + DrainTimeout: newArgs.WAL.DrainTimeout, }, } diff --git a/component/loki/write/write_test.go b/component/loki/write/write_test.go index 0e7c1a3165cb..d77bebe21c0f 100644 --- a/component/loki/write/write_test.go +++ b/component/loki/write/write_test.go @@ -10,18 +10,19 @@ import ( "testing" "time" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - "github.com/grafana/agent/component/common/loki" "github.com/grafana/agent/component/common/loki/wal" "github.com/grafana/agent/component/discovery" lsf "github.com/grafana/agent/component/loki/source/file" "github.com/grafana/agent/pkg/flow/componenttest" "github.com/grafana/agent/pkg/util" + "github.com/grafana/river" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "github.com/grafana/loki/pkg/logproto" loki_util "github.com/grafana/loki/pkg/util" - "github.com/grafana/river" ) func TestRiverConfig(t *testing.T) { @@ -78,6 +79,7 @@ func TestUnmarshallWalAttrributes(t *testing.T) { MaxSegmentAge: wal.DefaultMaxSegmentAge, MinReadFrequency: wal.DefaultWatchConfig.MinReadFrequency, MaxReadFrequency: wal.DefaultWatchConfig.MaxReadFrequency, + DrainTimeout: wal.DefaultWatchConfig.DrainTimeout, }, }, "wal enabled with defaults": { @@ -89,6 +91,7 @@ func TestUnmarshallWalAttrributes(t *testing.T) { MaxSegmentAge: wal.DefaultMaxSegmentAge, MinReadFrequency: wal.DefaultWatchConfig.MinReadFrequency, MaxReadFrequency: wal.DefaultWatchConfig.MaxReadFrequency, + DrainTimeout: wal.DefaultWatchConfig.DrainTimeout, }, }, "wal enabled with some overrides": { @@ -96,12 +99,14 @@ func TestUnmarshallWalAttrributes(t *testing.T) { enabled = true max_segment_age = "10m" min_read_frequency = "11ms" + drain_timeout = "5m" `, expected: WalArguments{ Enabled: true, MaxSegmentAge: time.Minute * 10, MinReadFrequency: time.Millisecond * 11, MaxReadFrequency: wal.DefaultWatchConfig.MaxReadFrequency, + DrainTimeout: time.Minute * 5, }, }, } { @@ -304,3 +309,98 @@ func testMultipleEndpoint(t *testing.T, alterArgs func(arguments *Arguments)) { } } } + +type testCase struct { + linesCount int + seriesCount int +} + +func BenchmarkLokiWrite(b *testing.B) { + for name, tc := range map[string]testCase{ + "100 lines, single series": { + linesCount: 100, + seriesCount: 1, + }, + "100k lines, 100 series": { + linesCount: 100_000, + seriesCount: 100, + }, + } { + b.Run(name, func(b *testing.B) { + benchSingleEndpoint(b, tc, func(arguments *Arguments) {}) + }) + } +} + +func benchSingleEndpoint(b *testing.B, tc testCase, alterConfig func(arguments *Arguments)) { + // Set up the server that will receive the log entry, and expose it on ch. + var seenLines atomic.Int64 + ch := make(chan logproto.PushRequest) + + // just count seenLines for each entry received + go func() { + for pr := range ch { + count := 0 + for _, str := range pr.Streams { + count += len(str.Entries) + } + seenLines.Add(int64(count)) + } + }() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var pushReq logproto.PushRequest + err := loki_util.ParseProtoReader(context.Background(), r.Body, int(r.ContentLength), math.MaxInt32, &pushReq, loki_util.RawSnappy) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + tenantHeader := r.Header.Get("X-Scope-OrgID") + require.Equal(b, tenantHeader, "tenant-1") + + ch <- pushReq + })) + defer srv.Close() + + // Set up the component Arguments. + cfg := fmt.Sprintf(` + endpoint { + url = "%s" + batch_wait = "10ms" + tenant_id = "tenant-1" + } + `, srv.URL) + var args Arguments + require.NoError(b, river.Unmarshal([]byte(cfg), &args)) + + alterConfig(&args) + + // Set up and start the component. + testComp, err := componenttest.NewControllerFromID(util.TestLogger(b), "loki.write") + require.NoError(b, err) + go func() { + err = testComp.Run(componenttest.TestContext(b), args) + require.NoError(b, err) + }() + require.NoError(b, testComp.WaitExports(time.Second)) + + // get exports from component + exports := testComp.Exports().(Exports) + + for i := 0; i < b.N; i++ { + for j := 0; j < tc.linesCount; j++ { + logEntry := loki.Entry{ + Labels: model.LabelSet{"foo": model.LabelValue(fmt.Sprintf("bar-%d", i%tc.seriesCount))}, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: "very important log", + }, + } + exports.Receiver.Chan() <- logEntry + } + + require.Eventually(b, func() bool { + return int64(tc.linesCount) == seenLines.Load() + }, time.Minute, time.Second, "haven't seen expected number of lines") + } +} diff --git a/component/metadata/metadata.go b/component/metadata/metadata.go new file mode 100644 index 000000000000..8ff2587ae8e3 --- /dev/null +++ b/component/metadata/metadata.go @@ -0,0 +1,193 @@ +package metadata + +import ( + "fmt" + "reflect" + + "github.com/grafana/agent/component" + _ "github.com/grafana/agent/component/all" + "github.com/grafana/agent/component/common/loki" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/otelcol" + "github.com/grafana/agent/component/pyroscope" + "github.com/prometheus/prometheus/storage" +) + +//TODO(thampiotr): Instead of metadata package reaching into registry, we'll migrate to using a YAML schema file that +// contains information about all the available components. This file will be generated separately and +// can be used by other tools. + +type Type struct { + Name string + // Returns true if provided args include this type (including nested structs) + existsInArgsFn func(args component.Arguments) bool + // Returns true if provided exports include this type (including nested structs) + existsInExportsFn func(exports component.Exports) bool +} + +func (t Type) String() string { + return fmt.Sprintf("Type[%s]", t.Name) +} + +var ( + TypeTargets = Type{ + Name: "Targets", + existsInArgsFn: func(args component.Arguments) bool { + return hasFieldOfType(args, reflect.TypeOf([]discovery.Target{})) + }, + existsInExportsFn: func(exports component.Exports) bool { + return hasFieldOfType(exports, reflect.TypeOf([]discovery.Target{})) + }, + } + + TypeLokiLogs = Type{ + Name: "Loki `LogsReceiver`", + existsInArgsFn: func(args component.Arguments) bool { + return hasFieldOfType(args, reflect.TypeOf([]loki.LogsReceiver{})) + }, + existsInExportsFn: func(exports component.Exports) bool { + return hasFieldOfType(exports, reflect.TypeOf(loki.NewLogsReceiver())) + }, + } + + TypePromMetricsReceiver = Type{ + Name: "Prometheus `MetricsReceiver`", + existsInArgsFn: func(args component.Arguments) bool { + return hasFieldOfType(args, reflect.TypeOf([]storage.Appendable{})) + }, + existsInExportsFn: func(exports component.Exports) bool { + var a *storage.Appendable = nil + return hasFieldOfType(exports, reflect.TypeOf(a).Elem()) + }, + } + + TypePyroProfilesReceiver = Type{ + Name: "Pyroscope `ProfilesReceiver`", + existsInArgsFn: func(args component.Arguments) bool { + return hasFieldOfType(args, reflect.TypeOf([]pyroscope.Appendable{})) + }, + existsInExportsFn: func(exports component.Exports) bool { + var a *pyroscope.Appendable = nil + return hasFieldOfType(exports, reflect.TypeOf(a).Elem()) + }, + } + + TypeOTELReceiver = Type{ + Name: "OpenTelemetry `otelcol.Consumer`", + existsInArgsFn: func(args component.Arguments) bool { + return hasFieldOfType(args, reflect.TypeOf([]otelcol.Consumer{})) + }, + existsInExportsFn: func(exports component.Exports) bool { + var a *otelcol.Consumer = nil + return hasFieldOfType(exports, reflect.TypeOf(a).Elem()) + }, + } + + AllTypes = []Type{ + TypeTargets, + TypeLokiLogs, + TypePromMetricsReceiver, + TypePyroProfilesReceiver, + TypeOTELReceiver, + } +) + +type Metadata struct { + accepts []Type + exports []Type +} + +func (m Metadata) Empty() bool { + return len(m.accepts) == 0 && len(m.exports) == 0 +} + +func (m Metadata) AllTypesAccepted() []Type { + return m.accepts +} + +func (m Metadata) AllTypesExported() []Type { + return m.exports +} + +func (m Metadata) AcceptsType(t Type) bool { + for _, a := range m.accepts { + if a.Name == t.Name { + return true + } + } + return false +} + +func (m Metadata) ExportsType(t Type) bool { + for _, o := range m.exports { + if o.Name == t.Name { + return true + } + } + return false +} + +func ForComponent(name string) (Metadata, error) { + reg, ok := component.Get(name) + if !ok { + return Metadata{}, fmt.Errorf("could not find component %q", name) + } + return inferMetadata(reg.Args, reg.Exports), nil +} + +func inferMetadata(args component.Arguments, exports component.Exports) Metadata { + m := Metadata{} + for _, t := range AllTypes { + if t.existsInArgsFn(args) { + m.accepts = append(m.accepts, t) + } + if t.existsInExportsFn(exports) { + m.exports = append(m.exports, t) + } + } + return m +} + +func hasFieldOfType(obj interface{}, fieldType reflect.Type) bool { + objValue := reflect.ValueOf(obj) + + // If the object is a pointer, dereference it + for objValue.Kind() == reflect.Ptr { + objValue = objValue.Elem() + } + + // If the object is not a struct or interface, return false + if objValue.Kind() != reflect.Struct && objValue.Kind() != reflect.Interface { + return false + } + + for i := 0; i < objValue.NumField(); i++ { + fv := objValue.Field(i) + ft := fv.Type() + + // If the field type matches the given type, return true + if ft == fieldType { + return true + } + + if fv.Kind() == reflect.Interface && fieldType.AssignableTo(ft) { + return true + } + + // If the field is a struct, recursively check its fields + if fv.Kind() == reflect.Struct { + if hasFieldOfType(fv.Interface(), fieldType) { + return true + } + } + + // If the field is a pointer, create a new instance of the pointer type and recursively check its fields + if fv.Kind() == reflect.Ptr { + if hasFieldOfType(reflect.New(ft.Elem()).Interface(), fieldType) { + return true + } + } + } + + return false +} diff --git a/component/metadata/metadata_test.go b/component/metadata/metadata_test.go new file mode 100644 index 000000000000..a60376b2c2d1 --- /dev/null +++ b/component/metadata/metadata_test.go @@ -0,0 +1,94 @@ +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_inferMetadata(t *testing.T) { + tests := []struct { + name string + expected Metadata + }{ + { + name: "discovery.dns", + expected: Metadata{exports: []Type{TypeTargets}}, + }, + { + name: "discovery.relabel", + expected: Metadata{ + accepts: []Type{TypeTargets}, + exports: []Type{TypeTargets}, + }, + }, + { + name: "loki.echo", + expected: Metadata{exports: []Type{TypeLokiLogs}}, + }, + { + name: "loki.source.file", + expected: Metadata{ + accepts: []Type{TypeTargets, TypeLokiLogs}, + }, + }, + { + name: "loki.process", + expected: Metadata{ + accepts: []Type{TypeLokiLogs}, + exports: []Type{TypeLokiLogs}, + }, + }, + { + name: "prometheus.relabel", + expected: Metadata{ + accepts: []Type{TypePromMetricsReceiver}, + exports: []Type{TypePromMetricsReceiver}, + }, + }, + { + name: "prometheus.remote_write", + expected: Metadata{ + accepts: []Type{}, + exports: []Type{TypePromMetricsReceiver}, + }, + }, + { + name: "otelcol.exporter.otlp", + expected: Metadata{ + accepts: []Type{}, + exports: []Type{TypeOTELReceiver}, + }, + }, + { + name: "otelcol.processor.filter", + expected: Metadata{ + accepts: []Type{TypeOTELReceiver}, + exports: []Type{TypeOTELReceiver}, + }, + }, + { + name: "faro.receiver", + expected: Metadata{ + accepts: []Type{TypeLokiLogs, TypeOTELReceiver}, + exports: []Type{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual, err := ForComponent(tt.name) + require.NoError(t, err) + + compareSlices := func(expected, actual []Type, name string) { + require.Equal(t, len(expected), len(actual), "expected %d %s types, got %d; expected: %v, actual: %v", len(expected), name, len(actual), expected, actual) + for i := range expected { + require.Equal(t, expected[i].Name, actual[i].Name, "expected %s type at %d to be %q, got %q", name, i, expected[i].Name, actual[i].Name) + } + } + + compareSlices(tt.expected.AllTypesAccepted(), actual.AllTypesAccepted(), "accepted") + compareSlices(tt.expected.AllTypesExported(), actual.AllTypesExported(), "exported") + }) + } +} diff --git a/component/mimir/rules/kubernetes/rules.go b/component/mimir/rules/kubernetes/rules.go index 16deaaa74b42..016a888d9104 100644 --- a/component/mimir/rules/kubernetes/rules.go +++ b/component/mimir/rules/kubernetes/rules.go @@ -10,6 +10,7 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/pkg/flow/logging/level" mimirClient "github.com/grafana/agent/pkg/mimir/client" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/instrument" promListers "github.com/prometheus-operator/prometheus-operator/pkg/client/listers/monitoring/v1" "github.com/prometheus/client_golang/prometheus" @@ -153,10 +154,22 @@ func New(o component.Options, args Arguments) (*Component, error) { } func (c *Component) Run(ctx context.Context) error { - err := c.startup(ctx) - if err != nil { - level.Error(c.log).Log("msg", "starting up component failed", "err", err) - c.reportUnhealthy(err) + startupBackoff := backoff.New( + ctx, + backoff.Config{ + MinBackoff: 1 * time.Second, + MaxBackoff: 10 * time.Second, + MaxRetries: 0, // infinite retries + }, + ) + for { + if err := c.startup(ctx); err != nil { + level.Error(c.log).Log("msg", "starting up component failed", "err", err) + c.reportUnhealthy(err) + } else { + break + } + startupBackoff.Wait() } for { @@ -205,8 +218,7 @@ func (c *Component) startup(ctx context.Context) error { if err := c.startRuleInformer(); err != nil { return err } - err := c.syncMimir(ctx) - if err != nil { + if err := c.syncMimir(ctx); err != nil { return err } go c.eventLoop(ctx) diff --git a/component/module/file/file.go b/component/module/file/file.go index 68b20760d5f4..e40c5dc9ca48 100644 --- a/component/module/file/file.go +++ b/component/module/file/file.go @@ -9,19 +9,14 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/local/file" "github.com/grafana/agent/component/module" - "github.com/grafana/agent/service/cluster" - "github.com/grafana/agent/service/http" - "github.com/grafana/agent/service/labelstore" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river/rivertypes" ) func init() { component.Register(component.Registration{ - Name: "module.file", - Args: Arguments{}, - Exports: module.Exports{}, - NeedsServices: []string{http.ServiceName, cluster.ServiceName, otel_service.ServiceName, labelstore.ServiceName}, + Name: "module.file", + Args: Arguments{}, + Exports: module.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) diff --git a/component/module/git/git.go b/component/module/git/git.go index 42ac468477de..dfe17ef2cb4a 100644 --- a/component/module/git/git.go +++ b/component/module/git/git.go @@ -3,6 +3,7 @@ package git import ( "context" + "errors" "path/filepath" "reflect" "sync" @@ -13,18 +14,13 @@ import ( "github.com/grafana/agent/component/module" "github.com/grafana/agent/component/module/git/internal/vcs" "github.com/grafana/agent/pkg/flow/logging/level" - "github.com/grafana/agent/service/cluster" - "github.com/grafana/agent/service/http" - "github.com/grafana/agent/service/labelstore" - otel_service "github.com/grafana/agent/service/otel" ) func init() { component.Register(component.Registration{ - Name: "module.git", - Args: Arguments{}, - Exports: module.Exports{}, - NeedsServices: []string{http.ServiceName, cluster.ServiceName, otel_service.ServiceName, labelstore.ServiceName}, + Name: "module.git", + Args: Arguments{}, + Exports: module.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) @@ -91,8 +87,15 @@ func New(o component.Options, args Arguments) (*Component, error) { argsChanged: make(chan struct{}, 1), } + // Only acknowledge the error from Update if it's not a + // vcs.UpdateFailedError; vcs.UpdateFailedError means that the Git repo + // exists but we were just unable to update it. if err := c.Update(args); err != nil { - return nil, err + if errors.As(err, &vcs.UpdateFailedError{}) { + level.Error(c.log).Log("msg", "failed to update repository", "err", err) + } else { + return nil, err + } } return c, nil } @@ -193,10 +196,16 @@ func (c *Component) Update(args component.Arguments) (err error) { } // Create or update the repo field. + // Failure to update repository makes the module loader temporarily use cached contents on disk if c.repo == nil || !reflect.DeepEqual(repoOpts, c.repoOpts) { r, err := vcs.NewGitRepo(context.Background(), repoPath, repoOpts) if err != nil { - return err + if errors.As(err, &vcs.UpdateFailedError{}) { + level.Error(c.log).Log("msg", "failed to update repository", "err", err) + c.updateHealth(err) + } else { + return err + } } c.repo = r c.repoOpts = repoOpts diff --git a/component/module/git/internal/vcs/git.go b/component/module/git/internal/vcs/git.go index 8209190b90da..dece43c10b2f 100644 --- a/component/module/git/internal/vcs/git.go +++ b/component/module/git/internal/vcs/git.go @@ -58,16 +58,24 @@ func NewGitRepo(ctx context.Context, storagePath string, opts GitRepoOptions) (* } // Fetch the latest contents. This may be a no-op if we just did a clone. - err = repo.FetchContext(ctx, &git.FetchOptions{ + fetchRepoErr := repo.FetchContext(ctx, &git.FetchOptions{ RemoteName: "origin", Force: true, Auth: opts.Auth.Convert(), }) - if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) { - return nil, UpdateFailedError{ - Repository: opts.Repository, - Inner: err, + if fetchRepoErr != nil && !errors.Is(fetchRepoErr, git.NoErrAlreadyUpToDate) { + workTree, err := repo.Worktree() + if err != nil { + return nil, err } + return &GitRepo{ + opts: opts, + repo: repo, + workTree: workTree, + }, UpdateFailedError{ + Repository: opts.Repository, + Inner: fetchRepoErr, + } } // Finally, hard reset to our requested revision. @@ -92,7 +100,7 @@ func NewGitRepo(ctx context.Context, storagePath string, opts GitRepoOptions) (* opts: opts, repo: repo, workTree: workTree, - }, nil + }, err } func isRepoCloned(dir string) bool { @@ -103,15 +111,16 @@ func isRepoCloned(dir string) bool { // Update updates the repository by fetching new content and re-checking out to // latest version of Revision. func (repo *GitRepo) Update(ctx context.Context) error { - err := repo.repo.FetchContext(ctx, &git.FetchOptions{ + var err error + fetchRepoErr := repo.repo.FetchContext(ctx, &git.FetchOptions{ RemoteName: "origin", Force: true, Auth: repo.opts.Auth.Convert(), }) - if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) { + if fetchRepoErr != nil && !errors.Is(fetchRepoErr, git.NoErrAlreadyUpToDate) { return UpdateFailedError{ Repository: repo.opts.Repository, - Inner: err, + Inner: fetchRepoErr, } } @@ -120,7 +129,6 @@ func (repo *GitRepo) Update(ctx context.Context) error { if err != nil { return InvalidRevisionError{Revision: repo.opts.Revision} } - err = repo.workTree.Reset(&git.ResetOptions{ Commit: hash, Mode: git.HardReset, diff --git a/component/module/http/http.go b/component/module/http/http.go index e5f2890d1114..5cd8ed1e1f71 100644 --- a/component/module/http/http.go +++ b/component/module/http/http.go @@ -9,19 +9,14 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/module" remote_http "github.com/grafana/agent/component/remote/http" - "github.com/grafana/agent/service/cluster" - http_service "github.com/grafana/agent/service/http" - "github.com/grafana/agent/service/labelstore" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river/rivertypes" ) func init() { component.Register(component.Registration{ - Name: "module.http", - Args: Arguments{}, - Exports: module.Exports{}, - NeedsServices: []string{http_service.ServiceName, cluster.ServiceName, otel_service.ServiceName, labelstore.ServiceName}, + Name: "module.http", + Args: Arguments{}, + Exports: module.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) diff --git a/component/module/string/string.go b/component/module/string/string.go index 7305abd75073..bd3e6193f441 100644 --- a/component/module/string/string.go +++ b/component/module/string/string.go @@ -5,19 +5,14 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/module" - "github.com/grafana/agent/service/cluster" - "github.com/grafana/agent/service/http" - "github.com/grafana/agent/service/labelstore" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river/rivertypes" ) func init() { component.Register(component.Registration{ - Name: "module.string", - Args: Arguments{}, - Exports: module.Exports{}, - NeedsServices: []string{http.ServiceName, cluster.ServiceName, otel_service.ServiceName, labelstore.ServiceName}, + Name: "module.string", + Args: Arguments{}, + Exports: module.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) diff --git a/component/otelcol/auth/basic/basic.go b/component/otelcol/auth/basic/basic.go index b82209e388a3..ceae037d7f40 100644 --- a/component/otelcol/auth/basic/basic.go +++ b/component/otelcol/auth/basic/basic.go @@ -4,7 +4,6 @@ package basic import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol/auth" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river/rivertypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" otelcomponent "go.opentelemetry.io/collector/component" @@ -14,10 +13,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.auth.basic", - Args: Arguments{}, - Exports: auth.Exports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.auth.basic", + Args: Arguments{}, + Exports: auth.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := basicauthextension.NewFactory() diff --git a/component/otelcol/auth/bearer/bearer.go b/component/otelcol/auth/bearer/bearer.go index bfcb40e6b55d..d99ea1b7cee9 100644 --- a/component/otelcol/auth/bearer/bearer.go +++ b/component/otelcol/auth/bearer/bearer.go @@ -4,7 +4,6 @@ package bearer import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol/auth" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river/rivertypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension" otelcomponent "go.opentelemetry.io/collector/component" @@ -14,10 +13,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.auth.bearer", - Args: Arguments{}, - Exports: auth.Exports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.auth.bearer", + Args: Arguments{}, + Exports: auth.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := bearertokenauthextension.NewFactory() diff --git a/component/otelcol/auth/headers/headers.go b/component/otelcol/auth/headers/headers.go index 56156759cac4..b0530639b8b4 100644 --- a/component/otelcol/auth/headers/headers.go +++ b/component/otelcol/auth/headers/headers.go @@ -8,7 +8,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol/auth" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river" "github.com/grafana/river/rivertypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension" @@ -18,10 +17,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.auth.headers", - Args: Arguments{}, - Exports: auth.Exports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.auth.headers", + Args: Arguments{}, + Exports: auth.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := headerssetterextension.NewFactory() diff --git a/component/otelcol/auth/oauth2/oauth2.go b/component/otelcol/auth/oauth2/oauth2.go index 3396dca94d06..6007bd59236a 100644 --- a/component/otelcol/auth/oauth2/oauth2.go +++ b/component/otelcol/auth/oauth2/oauth2.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/auth" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river/rivertypes" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" otelcomponent "go.opentelemetry.io/collector/component" @@ -17,10 +16,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.auth.oauth2", - Args: Arguments{}, - Exports: auth.Exports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.auth.oauth2", + Args: Arguments{}, + Exports: auth.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := oauth2clientauthextension.NewFactory() diff --git a/component/otelcol/auth/sigv4/sigv4.go b/component/otelcol/auth/sigv4/sigv4.go index 81336757fb6b..0a3db55c546b 100644 --- a/component/otelcol/auth/sigv4/sigv4.go +++ b/component/otelcol/auth/sigv4/sigv4.go @@ -3,7 +3,6 @@ package sigv4 import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol/auth" - otel_service "github.com/grafana/agent/service/otel" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" @@ -11,10 +10,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.auth.sigv4", - Args: Arguments{}, - Exports: auth.Exports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.auth.sigv4", + Args: Arguments{}, + Exports: auth.Exports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := sigv4authextension.NewFactory() diff --git a/component/otelcol/config_filter_test.go b/component/otelcol/config_filter_test.go index 9e4eab1ec293..39a1fae4c90a 100644 --- a/component/otelcol/config_filter_test.go +++ b/component/otelcol/config_filter_test.go @@ -3,11 +3,13 @@ package otelcol_test import ( "testing" + "k8s.io/utils/ptr" + "github.com/grafana/agent/component/otelcol" + "github.com/grafana/river" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/plog" - "k8s.io/utils/pointer" ) func TestConvertMatchProperties(t *testing.T) { @@ -49,7 +51,7 @@ func TestConvertMatchProperties(t *testing.T) { Libraries: []otelcol.InstrumentationLibrary{ { Name: "mongo-java-driver", - Version: pointer.String("3.8.0"), + Version: ptr.To("3.8.0"), }, }, SpanKinds: []string{"span1"}, diff --git a/component/otelcol/config_scrape.go b/component/otelcol/config_scrape.go new file mode 100644 index 000000000000..60f30ae946ac --- /dev/null +++ b/component/otelcol/config_scrape.go @@ -0,0 +1,58 @@ +package otelcol + +import ( + "errors" + "fmt" + "time" + + scraperhelper "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +var ( + errNonPositiveInterval = errors.New("requires positive value") + errGreaterThanZero = errors.New("requires a value greater than zero") +) + +// ScraperControllerArguments defines common settings for a scraper controller +// configuration. +type ScraperControllerArguments struct { + CollectionInterval time.Duration `river:"collection_interval,attr,optional"` + InitialDelay time.Duration `river:"initial_delay,attr,optional"` + Timeout time.Duration `river:"timeout,attr,optional"` +} + +// DefaultScraperControllerArguments holds default settings for ScraperControllerArguments. +var DefaultScraperControllerArguments = ScraperControllerArguments{ + CollectionInterval: time.Minute, + InitialDelay: time.Second, + Timeout: 0 * time.Second, +} + +// SetToDefault implements river.Defaulter. +func (args *ScraperControllerArguments) SetToDefault() { + *args = DefaultScraperControllerArguments +} + +// Convert converts args into the upstream type. +func (args *ScraperControllerArguments) Convert() *scraperhelper.ScraperControllerSettings { + if args == nil { + return nil + } + + return &scraperhelper.ScraperControllerSettings{ + CollectionInterval: args.CollectionInterval, + InitialDelay: args.InitialDelay, + Timeout: args.Timeout, + } +} + +// Validate returns an error if args is invalid. +func (args *ScraperControllerArguments) Validate() error { + if args.CollectionInterval <= 0 { + return fmt.Errorf(`"collection_interval": %w`, errNonPositiveInterval) + } + if args.Timeout < 0 { + return fmt.Errorf(`"timeout": %w`, errGreaterThanZero) + } + return nil +} diff --git a/component/otelcol/connector/servicegraph/servicegraph.go b/component/otelcol/connector/servicegraph/servicegraph.go index ce171700569f..c1713cca5ad5 100644 --- a/component/otelcol/connector/servicegraph/servicegraph.go +++ b/component/otelcol/connector/servicegraph/servicegraph.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/connector" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" @@ -17,10 +16,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.connector.servicegraph", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.connector.servicegraph", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := servicegraphconnector.NewFactory() @@ -93,7 +91,7 @@ var DefaultArguments = Arguments{ Dimensions: []string{}, Store: StoreConfig{ MaxItems: 1000, - TTL: 2 * time.Millisecond, + TTL: 2 * time.Second, }, CacheLoop: 1 * time.Minute, StoreExpirationLoop: 2 * time.Second, diff --git a/component/otelcol/connector/servicegraph/servicegraph_test.go b/component/otelcol/connector/servicegraph/servicegraph_test.go index 5f7204b2bb6b..952ac8fc06d5 100644 --- a/component/otelcol/connector/servicegraph/servicegraph_test.go +++ b/component/otelcol/connector/servicegraph/servicegraph_test.go @@ -44,7 +44,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { Dimensions: []string{}, Store: servicegraphprocessor.StoreConfig{ MaxItems: 1000, - TTL: 2 * time.Millisecond, + TTL: 2 * time.Second, }, CacheLoop: 1 * time.Minute, StoreExpirationLoop: 2 * time.Second, diff --git a/component/otelcol/connector/spanmetrics/spanmetrics.go b/component/otelcol/connector/spanmetrics/spanmetrics.go index 45b8b1f4bd0a..2a32c9b49642 100644 --- a/component/otelcol/connector/spanmetrics/spanmetrics.go +++ b/component/otelcol/connector/spanmetrics/spanmetrics.go @@ -8,7 +8,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/connector" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" otelcomponent "go.opentelemetry.io/collector/component" @@ -17,10 +16,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.connector.spanmetrics", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.connector.spanmetrics", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := spanmetricsconnector.NewFactory() diff --git a/component/otelcol/exporter/exporter.go b/component/otelcol/exporter/exporter.go index 947e37ebec0d..01893c7aa250 100644 --- a/component/otelcol/exporter/exporter.go +++ b/component/otelcol/exporter/exporter.go @@ -44,6 +44,33 @@ type Arguments interface { DebugMetricsConfig() otelcol.DebugMetricsArguments } +// TypeSignal is a bit field to indicate which telemetry signals the exporter supports. +type TypeSignal byte + +const ( + TypeLogs TypeSignal = 1 << iota // 1 + TypeMetrics // 2 + TypeTraces // 4 +) + +// TypeAll indicates that the exporter supports all telemetry signals. +const TypeAll = TypeLogs | TypeMetrics | TypeTraces + +// SupportsLogs returns true if the exporter supports logs. +func (s TypeSignal) SupportsLogs() bool { + return s&TypeLogs != 0 +} + +// SupportsMetrics returns true if the exporter supports metrics. +func (s TypeSignal) SupportsMetrics() bool { + return s&TypeMetrics != 0 +} + +// SupportsTraces returns true if the exporter supports traces. +func (s TypeSignal) SupportsTraces() bool { + return s&TypeTraces != 0 +} + // Exporter is a Flow component shim which manages an OpenTelemetry Collector // exporter component. type Exporter struct { @@ -56,6 +83,10 @@ type Exporter struct { sched *scheduler.Scheduler collector *lazycollector.Collector + + // Signals which the exporter is able to export. + // Can be logs, metrics, traces or any combination of them. + supportedSignals TypeSignal } var ( @@ -69,7 +100,7 @@ var ( // // The registered component must be registered to export the // otelcol.ConsumerExports type, otherwise New will panic. -func New(opts component.Options, f otelexporter.Factory, args Arguments) (*Exporter, error) { +func New(opts component.Options, f otelexporter.Factory, args Arguments, supportedSignals TypeSignal) (*Exporter, error) { ctx, cancel := context.WithCancel(context.Background()) consumer := lazyconsumer.New(ctx) @@ -96,6 +127,8 @@ func New(opts component.Options, f otelexporter.Factory, args Arguments) (*Expor sched: scheduler.New(opts.Logger), collector: collector, + + supportedSignals: supportedSignals, } if err := e.Update(args); err != nil { return nil, err @@ -162,25 +195,34 @@ func (e *Exporter) Update(args component.Arguments) error { // supported telemetry signals. var components []otelcomponent.Component - tracesExporter, err := e.factory.CreateTracesExporter(e.ctx, settings, exporterConfig) - if err != nil && !errors.Is(err, otelcomponent.ErrDataTypeIsNotSupported) { - return err - } else if tracesExporter != nil { - components = append(components, tracesExporter) + var tracesExporter otelexporter.Traces + if e.supportedSignals.SupportsTraces() { + tracesExporter, err = e.factory.CreateTracesExporter(e.ctx, settings, exporterConfig) + if err != nil && !errors.Is(err, otelcomponent.ErrDataTypeIsNotSupported) { + return err + } else if tracesExporter != nil { + components = append(components, tracesExporter) + } } - metricsExporter, err := e.factory.CreateMetricsExporter(e.ctx, settings, exporterConfig) - if err != nil && !errors.Is(err, otelcomponent.ErrDataTypeIsNotSupported) { - return err - } else if metricsExporter != nil { - components = append(components, metricsExporter) + var metricsExporter otelexporter.Metrics + if e.supportedSignals.SupportsMetrics() { + metricsExporter, err = e.factory.CreateMetricsExporter(e.ctx, settings, exporterConfig) + if err != nil && !errors.Is(err, otelcomponent.ErrDataTypeIsNotSupported) { + return err + } else if metricsExporter != nil { + components = append(components, metricsExporter) + } } - logsExporter, err := e.factory.CreateLogsExporter(e.ctx, settings, exporterConfig) - if err != nil && !errors.Is(err, otelcomponent.ErrDataTypeIsNotSupported) { - return err - } else if logsExporter != nil { - components = append(components, logsExporter) + var logsExporter otelexporter.Logs + if e.supportedSignals.SupportsLogs() { + logsExporter, err = e.factory.CreateLogsExporter(e.ctx, settings, exporterConfig) + if err != nil && !errors.Is(err, otelcomponent.ErrDataTypeIsNotSupported) { + return err + } else if logsExporter != nil { + components = append(components, logsExporter) + } } // Schedule the components to run once our component is running. diff --git a/component/otelcol/exporter/exporter_test.go b/component/otelcol/exporter/exporter_test.go index be07ab2b48f7..7ef29244518c 100644 --- a/component/otelcol/exporter/exporter_test.go +++ b/component/otelcol/exporter/exporter_test.go @@ -103,7 +103,7 @@ func newTestEnvironment(t *testing.T, fe *fakeExporter) *testEnvironment { }, otelcomponent.StabilityLevelUndefined), ) - return exporter.New(opts, factory, args.(exporter.Arguments)) + return exporter.New(opts, factory, args.(exporter.Arguments), exporter.TypeAll) }, } @@ -198,3 +198,37 @@ func createTestTraces() ptrace.Traces { } return data } + +func TestExporterSignalType(t *testing.T) { + // + // Check if ExporterAll supports all signals + // + require.True(t, exporter.TypeAll.SupportsLogs()) + require.True(t, exporter.TypeAll.SupportsMetrics()) + require.True(t, exporter.TypeAll.SupportsTraces()) + + // + // Make sure each of the 3 signals supports itself + // + require.True(t, exporter.TypeLogs.SupportsLogs()) + require.True(t, exporter.TypeMetrics.SupportsMetrics()) + require.True(t, exporter.TypeTraces.SupportsTraces()) + + // + // Make sure Logs does not support Metrics and Traces. + // + require.False(t, exporter.TypeLogs.SupportsMetrics()) + require.False(t, exporter.TypeLogs.SupportsTraces()) + + // + // Make sure Metrics does not support Logs and Traces. + // + require.False(t, exporter.TypeMetrics.SupportsLogs()) + require.False(t, exporter.TypeMetrics.SupportsTraces()) + + // + // Make sure Traces does not support Logs and Metrics. + // + require.False(t, exporter.TypeTraces.SupportsLogs()) + require.False(t, exporter.TypeTraces.SupportsMetrics()) +} diff --git a/component/otelcol/exporter/loadbalancing/loadbalancing.go b/component/otelcol/exporter/loadbalancing/loadbalancing.go index 5fee871a7f03..3455318fef38 100644 --- a/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -10,7 +10,6 @@ import ( "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/auth" "github.com/grafana/agent/component/otelcol/exporter" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" otelcomponent "go.opentelemetry.io/collector/component" @@ -24,14 +23,16 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.exporter.loadbalancing", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.exporter.loadbalancing", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := loadbalancingexporter.NewFactory() - return exporter.New(opts, fact, args.(Arguments)) + //TODO(ptodev): LB exporter cannot yet work with metrics due to a limitation in the Agent: + // https://github.com/grafana/agent/pull/5684 + // Once the limitation is removed, we may be able to remove the need for exporter.TypeSignal altogether. + return exporter.New(opts, fact, args.(Arguments), exporter.TypeLogs|exporter.TypeTraces) }, }) } @@ -137,8 +138,9 @@ func (otlpConfig OtlpConfig) Convert() otlpexporter.Config { // ResolverSettings defines the configurations for the backend resolver type ResolverSettings struct { - Static *StaticResolver `river:"static,block,optional"` - DNS *DNSResolver `river:"dns,block,optional"` + Static *StaticResolver `river:"static,block,optional"` + DNS *DNSResolver `river:"dns,block,optional"` + Kubernetes *KubernetesResolver `river:"kubernetes,block,optional"` } func (resolverSettings ResolverSettings) Convert() loadbalancingexporter.ResolverSettings { @@ -154,6 +156,11 @@ func (resolverSettings ResolverSettings) Convert() loadbalancingexporter.Resolve res.DNS = &dnsResolver } + if resolverSettings.Kubernetes != nil { + kubernetesResolver := resolverSettings.Kubernetes.Convert() + res.K8sSvc = &kubernetesResolver + } + return res } @@ -199,6 +206,29 @@ func (dnsResolver *DNSResolver) Convert() loadbalancingexporter.DNSResolver { } } +// KubernetesResolver defines the configuration for the k8s resolver +type KubernetesResolver struct { + Service string `river:"service,attr"` + Ports []int32 `river:"ports,attr,optional"` +} + +var _ river.Defaulter = &KubernetesResolver{} + +// SetToDefault implements river.Defaulter. +func (args *KubernetesResolver) SetToDefault() { + if args == nil { + args = &KubernetesResolver{} + } + args.Ports = []int32{4317} +} + +func (k8sSvcResolver *KubernetesResolver) Convert() loadbalancingexporter.K8sSvcResolver { + return loadbalancingexporter.K8sSvcResolver{ + Service: k8sSvcResolver.Service, + Ports: append([]int32{}, k8sSvcResolver.Ports...), + } +} + // Extensions implements exporter.Arguments. func (args Arguments) Extensions() map[otelcomponent.ID]otelextension.Extension { return args.Protocol.OTLP.Client.Extensions() diff --git a/component/otelcol/exporter/loadbalancing/loadbalancing_test.go b/component/otelcol/exporter/loadbalancing/loadbalancing_test.go index a5e0851fef3d..5e528dd373a3 100644 --- a/component/otelcol/exporter/loadbalancing/loadbalancing_test.go +++ b/component/otelcol/exporter/loadbalancing/loadbalancing_test.go @@ -203,6 +203,59 @@ func TestConfigConversion(t *testing.T) { Protocol: defaultProtocol, }, }, + { + testName: "k8s with defaults", + agentCfg: ` + resolver { + kubernetes { + service = "lb-svc.lb-ns" + } + } + protocol { + otlp { + client {} + } + } + `, + expected: loadbalancingexporter.Config{ + Resolver: loadbalancingexporter.ResolverSettings{ + Static: nil, + K8sSvc: &loadbalancingexporter.K8sSvcResolver{ + Service: "lb-svc.lb-ns", + Ports: []int32{4317}, + }, + }, + RoutingKey: "traceID", + Protocol: defaultProtocol, + }, + }, + { + testName: "k8s with non-defaults", + agentCfg: ` + resolver { + kubernetes { + service = "lb-svc.lb-ns" + ports = [55690, 55691] + } + } + protocol { + otlp { + client {} + } + } + `, + expected: loadbalancingexporter.Config{ + Resolver: loadbalancingexporter.ResolverSettings{ + Static: nil, + K8sSvc: &loadbalancingexporter.K8sSvcResolver{ + Service: "lb-svc.lb-ns", + Ports: []int32{55690, 55691}, + }, + }, + RoutingKey: "traceID", + Protocol: defaultProtocol, + }, + }, } for _, tc := range tests { diff --git a/component/otelcol/exporter/logging/logging.go b/component/otelcol/exporter/logging/logging.go index 9976c28b6209..13d12fbf312e 100644 --- a/component/otelcol/exporter/logging/logging.go +++ b/component/otelcol/exporter/logging/logging.go @@ -5,7 +5,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/exporter" - otel_service "github.com/grafana/agent/service/otel" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" loggingexporter "go.opentelemetry.io/collector/exporter/loggingexporter" @@ -14,14 +13,13 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.exporter.logging", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.exporter.logging", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := loggingexporter.NewFactory() - return exporter.New(opts, fact, args.(Arguments)) + return exporter.New(opts, fact, args.(Arguments), exporter.TypeAll) }, }) } diff --git a/component/otelcol/exporter/otlp/otlp.go b/component/otelcol/exporter/otlp/otlp.go index aea6fd02b4bb..7ca10d2c2c0b 100644 --- a/component/otelcol/exporter/otlp/otlp.go +++ b/component/otelcol/exporter/otlp/otlp.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/exporter" - otel_service "github.com/grafana/agent/service/otel" otelcomponent "go.opentelemetry.io/collector/component" otelpexporterhelper "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/otlpexporter" @@ -16,14 +15,13 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.exporter.otlp", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.exporter.otlp", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := otlpexporter.NewFactory() - return exporter.New(opts, fact, args.(Arguments)) + return exporter.New(opts, fact, args.(Arguments), exporter.TypeAll) }, }) } diff --git a/component/otelcol/exporter/otlphttp/otlphttp.go b/component/otelcol/exporter/otlphttp/otlphttp.go index bf142960f6a9..0508ec2e6289 100644 --- a/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/component/otelcol/exporter/otlphttp/otlphttp.go @@ -8,7 +8,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/exporter" - otel_service "github.com/grafana/agent/service/otel" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/otlphttpexporter" otelextension "go.opentelemetry.io/collector/extension" @@ -16,14 +15,13 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.exporter.otlphttp", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.exporter.otlphttp", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := otlphttpexporter.NewFactory() - return exporter.New(opts, fact, args.(Arguments)) + return exporter.New(opts, fact, args.(Arguments), exporter.TypeAll) }, }) } diff --git a/component/otelcol/exporter/prometheus/internal/convert/cache.go b/component/otelcol/exporter/prometheus/internal/convert/cache.go index ac0dc12087f1..3401e87e7f8d 100644 --- a/component/otelcol/exporter/prometheus/internal/convert/cache.go +++ b/component/otelcol/exporter/prometheus/internal/convert/cache.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" @@ -117,6 +118,17 @@ func (series *memorySeries) WriteExemplarsTo(app storage.Appender, e exemplar.Ex return nil } +func (series *memorySeries) WriteNativeHistogramTo(app storage.Appender, ts time.Time, h *histogram.Histogram, fh *histogram.FloatHistogram) error { + series.Lock() + defer series.Unlock() + + if _, err := app.AppendHistogram(series.id, series.labels, timestamp.FromTime(ts), h, fh); err != nil { + return err + } + + return nil +} + type memoryMetadata struct { sync.Mutex diff --git a/component/otelcol/exporter/prometheus/internal/convert/convert.go b/component/otelcol/exporter/prometheus/internal/convert/convert.go index 3e2a2578c5a4..fc0f2287de01 100644 --- a/component/otelcol/exporter/prometheus/internal/convert/convert.go +++ b/component/otelcol/exporter/prometheus/internal/convert/convert.go @@ -65,6 +65,8 @@ type Options struct { IncludeScopeLabels bool // AddMetricSuffixes controls whether suffixes are added to metric names. Defaults to true. AddMetricSuffixes bool + // ResourceToTelemetryConversion controls whether to convert resource attributes to Prometheus-compatible datapoint attributes + ResourceToTelemetryConversion bool } var _ consumer.Metrics = (*Converter)(nil) @@ -131,6 +133,7 @@ func (conv *Converter) consumeResourceMetrics(app storage.Appender, rm pmetric.R Type: textparse.MetricTypeGauge, Help: "Target metadata", }) + resAttrs := rm.Resource().Attributes() memResource := conv.getOrCreateResource(rm.Resource()) if conv.getOpts().IncludeTargetInfo { @@ -144,7 +147,7 @@ func (conv *Converter) consumeResourceMetrics(app storage.Appender, rm pmetric.R for smcount := 0; smcount < rm.ScopeMetrics().Len(); smcount++ { sm := rm.ScopeMetrics().At(smcount) - conv.consumeScopeMetrics(app, memResource, sm) + conv.consumeScopeMetrics(app, memResource, sm, resAttrs) } } @@ -219,7 +222,7 @@ func (conv *Converter) getOrCreateResource(res pcommon.Resource) *memorySeries { return entry } -func (conv *Converter) consumeScopeMetrics(app storage.Appender, memResource *memorySeries, sm pmetric.ScopeMetrics) { +func (conv *Converter) consumeScopeMetrics(app storage.Appender, memResource *memorySeries, sm pmetric.ScopeMetrics, resAttrs pcommon.Map) { scopeMD := conv.createOrUpdateMetadata("otel_scope_info", metadata.Metadata{ Type: textparse.MetricTypeGauge, }) @@ -236,7 +239,7 @@ func (conv *Converter) consumeScopeMetrics(app storage.Appender, memResource *me for mcount := 0; mcount < sm.Metrics().Len(); mcount++ { m := sm.Metrics().At(mcount) - conv.consumeMetric(app, memResource, memScope, m) + conv.consumeMetric(app, memResource, memScope, m, resAttrs) } } @@ -274,20 +277,29 @@ func (conv *Converter) getOrCreateScope(res *memorySeries, scope pcommon.Instrum return entry } -func (conv *Converter) consumeMetric(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeMetric(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { switch m.Type() { case pmetric.MetricTypeGauge: - conv.consumeGauge(app, memResource, memScope, m) + conv.consumeGauge(app, memResource, memScope, m, resAttrs) case pmetric.MetricTypeSum: - conv.consumeSum(app, memResource, memScope, m) + conv.consumeSum(app, memResource, memScope, m, resAttrs) case pmetric.MetricTypeHistogram: - conv.consumeHistogram(app, memResource, memScope, m) + conv.consumeHistogram(app, memResource, memScope, m, resAttrs) case pmetric.MetricTypeSummary: - conv.consumeSummary(app, memResource, memScope, m) + conv.consumeSummary(app, memResource, memScope, m, resAttrs) + case pmetric.MetricTypeExponentialHistogram: + conv.consumeExponentialHistogram(app, memResource, memScope, m, resAttrs) } } -func (conv *Converter) consumeGauge(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func joinAttributeMaps(from, to pcommon.Map) { + from.Range(func(k string, v pcommon.Value) bool { + v.CopyTo(to.PutEmpty(k)) + return true + }) +} + +func (conv *Converter) consumeGauge(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) metricMD := conv.createOrUpdateMetadata(metricName, metadata.Metadata{ @@ -296,12 +308,16 @@ func (conv *Converter) consumeGauge(app storage.Appender, memResource *memorySer Help: m.Description(), }) if err := metricMD.WriteTo(app, time.Now()); err != nil { - level.Warn(conv.log).Log("msg", "failed to write metric family metadata, metric name", metricName, "err", err) + level.Warn(conv.log).Log("msg", "failed to write metric family metadata", "metric name", metricName, "err", err) } for dpcount := 0; dpcount < m.Gauge().DataPoints().Len(); dpcount++ { dp := m.Gauge().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + memSeries := conv.getOrCreateSeries(memResource, memScope, metricName, dp.Attributes()) if err := writeSeries(app, memSeries, dp, getNumberDataPointValue(dp)); err != nil { level.Error(conv.log).Log("msg", "failed to write metric sample", metricName, "err", err) @@ -389,7 +405,7 @@ func getNumberDataPointValue(dp pmetric.NumberDataPoint) float64 { return 0 } -func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) // Excerpt from the spec: @@ -424,12 +440,16 @@ func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySerie Help: m.Description(), }) if err := metricMD.WriteTo(app, time.Now()); err != nil { - level.Warn(conv.log).Log("msg", "failed to write metric family metadata, metric name", metricName, "err", err) + level.Warn(conv.log).Log("msg", "failed to write metric family metadata", "metric name", metricName, "err", err) } for dpcount := 0; dpcount < m.Sum().DataPoints().Len(); dpcount++ { dp := m.Sum().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + memSeries := conv.getOrCreateSeries(memResource, memScope, metricName, dp.Attributes()) val := getNumberDataPointValue(dp) @@ -447,7 +467,7 @@ func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySerie } } -func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) if m.Histogram().AggregationTemporality() != pmetric.AggregationTemporalityCumulative { @@ -463,19 +483,23 @@ func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memor Help: m.Description(), }) if err := metricMD.WriteTo(app, time.Now()); err != nil { - level.Warn(conv.log).Log("msg", "failed to write metric family metadata, metric name", metricName, "err", err) + level.Warn(conv.log).Log("msg", "failed to write metric family metadata", "metric name", metricName, "err", err) } for dpcount := 0; dpcount < m.Histogram().DataPoints().Len(); dpcount++ { dp := m.Histogram().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + // Sum metric if dp.HasSum() { sumMetric := conv.getOrCreateSeries(memResource, memScope, metricName+"_sum", dp.Attributes()) sumMetricVal := dp.Sum() if err := writeSeries(app, sumMetric, dp, sumMetricVal); err != nil { - level.Error(conv.log).Log("msg", "failed to write histogram sum sample, metric name", metricName, "err", err) + level.Error(conv.log).Log("msg", "failed to write histogram sum sample", "metric name", metricName, "err", err) } } @@ -485,7 +509,7 @@ func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memor countMetricVal := float64(dp.Count()) if err := writeSeries(app, countMetric, dp, countMetricVal); err != nil { - level.Error(conv.log).Log("msg", "failed to write histogram count sample, metric name", metricName, "err", err) + level.Error(conv.log).Log("msg", "failed to write histogram count sample", "metric name", metricName, "err", err) } } @@ -540,13 +564,13 @@ func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memor bucketVal := float64(count) if err := writeSeries(app, bucket, dp, bucketVal); err != nil { - level.Error(conv.log).Log("msg", "failed to write histogram bucket sample, metric name", metricName, "bucket", bucketLabel.Value, "err", err) + level.Error(conv.log).Log("msg", "failed to write histogram bucket sample", "metric name", metricName, "bucket", bucketLabel.Value, "err", err) } for ; exemplarInd < len(exemplars); exemplarInd++ { if exemplars[exemplarInd].DoubleValue() < bound { if err := conv.writeExemplar(app, bucket, exemplars[exemplarInd]); err != nil { - level.Error(conv.log).Log("msg", "failed to add exemplar, metric name", metricName, "bucket", bucketLabel.Value, "err", err) + level.Error(conv.log).Log("msg", "failed to add exemplar", "metric name", metricName, "bucket", bucketLabel.Value, "err", err) } } else { break @@ -566,19 +590,72 @@ func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memor infBucketVal := float64(dp.Count()) if err := writeSeries(app, infBucket, dp, infBucketVal); err != nil { - level.Error(conv.log).Log("msg", "failed to write histogram bucket sample, metric name", metricName, "bucket", bucketLabel.Value, "err", err) + level.Error(conv.log).Log("msg", "failed to write histogram bucket sample", "metric name", metricName, "bucket", bucketLabel.Value, "err", err) } // Add remaining exemplars. for ; exemplarInd < len(exemplars); exemplarInd++ { if err := conv.writeExemplar(app, infBucket, exemplars[exemplarInd]); err != nil { - level.Error(conv.log).Log("msg", "failed to add exemplar, metric name", metricName, "bucket", bucketLabel.Value, "err", err) + level.Error(conv.log).Log("msg", "failed to add exemplar", "metric name", metricName, "bucket", bucketLabel.Value, "err", err) } } } } } +func (conv *Converter) consumeExponentialHistogram(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { + metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) + + if m.ExponentialHistogram().AggregationTemporality() != pmetric.AggregationTemporalityCumulative { + // Drop non-cumulative histograms for now, which is permitted by the spec. + return + } + + metricMD := conv.createOrUpdateMetadata(metricName, metadata.Metadata{ + Type: textparse.MetricTypeHistogram, + Unit: m.Unit(), + Help: m.Description(), + }) + if err := metricMD.WriteTo(app, time.Now()); err != nil { + level.Warn(conv.log).Log("msg", "failed to write metric family metadata", "metric name", metricName, "err", err) + } + + for dpcount := 0; dpcount < m.ExponentialHistogram().DataPoints().Len(); dpcount++ { + dp := m.ExponentialHistogram().DataPoints().At(dpcount) + + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + + memSeries := conv.getOrCreateSeries(memResource, memScope, metricName, dp.Attributes()) + + ts := dp.Timestamp().AsTime() + if ts.Before(memSeries.Timestamp()) { + // Out-of-order; skip. + continue + } + memSeries.SetTimestamp(ts) + + promHistogram, err := exponentialToNativeHistogram(dp) + + if err != nil { + level.Error(conv.log).Log("msg", "failed to convert exponential histogram to native histogram", "metric name", metricName, "err", err) + continue + } + + if err := memSeries.WriteNativeHistogramTo(app, ts, &promHistogram, nil); err != nil { + level.Error(conv.log).Log("msg", "failed to write native histogram", "metric name", metricName, "err", err) + continue + } + + for i := 0; i < dp.Exemplars().Len(); i++ { + if err := conv.writeExemplar(app, memSeries, dp.Exemplars().At(i)); err != nil { + level.Error(conv.log).Log("msg", "failed to add exemplar", "metric name", metricName, "err", err) + } + } + } +} + // Convert Otel Exemplar to Prometheus Exemplar. func (conv *Converter) convertExemplar(otelExemplar pmetric.Exemplar, ts time.Time) exemplar.Exemplar { exemplarLabels := make(labels.Labels, 0) @@ -606,7 +683,7 @@ func (conv *Converter) convertExemplar(otelExemplar pmetric.Exemplar, ts time.Ti } } -func (conv *Converter) consumeSummary(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeSummary(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) metricMD := conv.createOrUpdateMetadata(metricName, metadata.Metadata{ @@ -615,19 +692,23 @@ func (conv *Converter) consumeSummary(app storage.Appender, memResource *memoryS Help: m.Description(), }) if err := metricMD.WriteTo(app, time.Now()); err != nil { - level.Warn(conv.log).Log("msg", "failed to write metric family metadata, metric name", metricName, "err", err) + level.Warn(conv.log).Log("msg", "failed to write metric family metadata", "metric name", metricName, "err", err) } for dpcount := 0; dpcount < m.Summary().DataPoints().Len(); dpcount++ { dp := m.Summary().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + // Sum metric { sumMetric := conv.getOrCreateSeries(memResource, memScope, metricName+"_sum", dp.Attributes()) sumMetricVal := dp.Sum() if err := writeSeries(app, sumMetric, dp, sumMetricVal); err != nil { - level.Error(conv.log).Log("msg", "failed to write summary sum sample, metric name", metricName, "err", err) + level.Error(conv.log).Log("msg", "failed to write summary sum sample", "metric name", metricName, "err", err) } } @@ -637,7 +718,7 @@ func (conv *Converter) consumeSummary(app storage.Appender, memResource *memoryS countMetricVal := float64(dp.Count()) if err := writeSeries(app, countMetric, dp, countMetricVal); err != nil { - level.Error(conv.log).Log("msg", "failed to write histogram count sample, metric name", metricName, "err", err) + level.Error(conv.log).Log("msg", "failed to write histogram count sample", "metric name", metricName, "err", err) } } @@ -654,7 +735,7 @@ func (conv *Converter) consumeSummary(app storage.Appender, memResource *memoryS quantileVal := qp.Value() if err := writeSeries(app, quantile, dp, quantileVal); err != nil { - level.Error(conv.log).Log("msg", "failed to write histogram quantile sample, metric name", metricName, "quantile", quantileLabel.Value, "err", err) + level.Error(conv.log).Log("msg", "failed to write histogram quantile sample", "metric name", metricName, "quantile", quantileLabel.Value, "err", err) } } } diff --git a/component/otelcol/exporter/prometheus/internal/convert/convert_test.go b/component/otelcol/exporter/prometheus/internal/convert/convert_test.go index 80a6bce1a55b..928e3fc2f813 100644 --- a/component/otelcol/exporter/prometheus/internal/convert/convert_test.go +++ b/component/otelcol/exporter/prometheus/internal/convert/convert_test.go @@ -2,6 +2,7 @@ package convert_test import ( "context" + "encoding/json" "testing" "github.com/grafana/agent/component/otelcol/exporter/prometheus/internal/convert" @@ -18,12 +19,13 @@ func TestConverter(t *testing.T) { input string expect string - showTimestamps bool - includeTargetInfo bool - includeScopeInfo bool - includeScopeLabels bool - addMetricSuffixes bool - enableOpenMetrics bool + showTimestamps bool + includeTargetInfo bool + includeScopeInfo bool + includeScopeLabels bool + addMetricSuffixes bool + enableOpenMetrics bool + resourceToTelemetryConversion bool }{ { name: "Gauge", @@ -838,6 +840,274 @@ func TestConverter(t *testing.T) { addMetricSuffixes: true, enableOpenMetrics: true, }, + { + name: "Gauge: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [{ + "name": "test_metric_gauge", + "gauge": { + "data_points": [{ + "as_double": 1234.56 + }] + } + }] + }] + }] + }`, + expect: ` + # TYPE test_metric_gauge gauge + test_metric_gauge{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 1234.56 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, + { + name: "Gauge: NOT convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [{ + "name": "test_metric_gauge", + "gauge": { + "data_points": [{ + "as_double": 1234.56 + }] + } + }] + }] + }] + }`, + expect: ` + # TYPE test_metric_gauge gauge + test_metric_gauge{instance="instance",job="myservice"} 1234.56 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: false, + }, + { + name: "Summary: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [{ + "name": "test_metric_summary", + "unit": "seconds", + "summary": { + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "count": 333, + "sum": 100, + "quantile_values": [ + { "quantile": 0, "value": 100 }, + { "quantile": 0.5, "value": 400 }, + { "quantile": 1, "value": 500 } + ] + }] + } + }] + }] + }] + }`, + expect: ` + # TYPE test_metric_summary summary + test_metric_summary{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",quantile="0.0"} 100.0 + test_metric_summary{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",quantile="0.5"} 400.0 + test_metric_summary{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",quantile="1.0"} 500.0 + test_metric_summary_sum{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 100.0 + test_metric_summary_count{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 333 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, + { + name: "Histogram: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [ + { + "name": "test_metric_histogram", + "unit": "seconds", + "histogram": { + "aggregation_temporality": 2, + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "count": 333, + "sum": 100, + "bucket_counts": [0, 111, 0, 222], + "explicit_bounds": [0.25, 0.5, 0.75, 1.0], + "exemplars":[ + { + "time_unix_nano": 1000000001, + "as_double": 0.3, + "span_id": "aaaaaaaaaaaaaaaa", + "trace_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + }, + { + "time_unix_nano": 1000000003, + "as_double": 1.5, + "span_id": "cccccccccccccccc", + "trace_id": "cccccccccccccccccccccccccccccccc" + }, + { + "time_unix_nano": 1000000002, + "as_double": 0.5, + "span_id": "bbbbbbbbbbbbbbbb", + "trace_id": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } + ] + }] + } + } + ] + }] + }] + }`, + expect: ` + # TYPE test_metric_histogram histogram + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="0.25"} 0 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="0.5"} 111 # {span_id="aaaaaaaaaaaaaaaa",trace_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} 0.3 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="0.75"} 111 # {span_id="bbbbbbbbbbbbbbbb",trace_id="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} 0.5 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="1.0"} 333 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="+Inf"} 333 # {span_id="cccccccccccccccc",trace_id="cccccccccccccccccccccccccccccccc"} 1.5 + test_metric_histogram_sum{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 100.0 + test_metric_histogram_count{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 333 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, + { + name: "Monotonic sum: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [ + { + "name": "test_metric_mono_sum_total", + "unit": "seconds", + "sum": { + "aggregation_temporality": 2, + "is_monotonic": true, + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "as_double": 15, + "exemplars":[ + { + "time_unix_nano": 1000000001, + "as_double": 0.3, + "span_id": "aaaaaaaaaaaaaaaa", + "trace_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + } + ] + }] + } + } + ] + }] + }] + }`, + expect: ` + # TYPE test_metric_mono_sum counter + test_metric_mono_sum_total{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 15.0 # {span_id="aaaaaaaaaaaaaaaa",trace_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} 0.3 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, } decoder := &pmetric.JSONUnmarshaler{} @@ -851,10 +1121,11 @@ func TestConverter(t *testing.T) { l := util.TestLogger(t) conv := convert.New(l, appenderAppendable{Inner: &app}, convert.Options{ - IncludeTargetInfo: tc.includeTargetInfo, - IncludeScopeInfo: tc.includeScopeInfo, - IncludeScopeLabels: tc.includeScopeLabels, - AddMetricSuffixes: tc.addMetricSuffixes, + IncludeTargetInfo: tc.includeTargetInfo, + IncludeScopeInfo: tc.includeScopeInfo, + IncludeScopeLabels: tc.includeScopeLabels, + AddMetricSuffixes: tc.addMetricSuffixes, + ResourceToTelemetryConversion: tc.resourceToTelemetryConversion, }) require.NoError(t, conv.ConsumeMetrics(context.Background(), payload)) @@ -867,6 +1138,201 @@ func TestConverter(t *testing.T) { } } +// Exponential histograms don't have a text format representation. +// In this test we are comparing the JSON format. +func TestConverterExponentialHistograms(t *testing.T) { + tt := []struct { + name string + input string + expect string + }{ + { + name: "Exponential Histogram", + input: `{ + "resource_metrics": [{ + "scope_metrics": [{ + "metrics": [{ + "name": "test_exponential_histogram", + "exponential_histogram": { + "aggregation_temporality": 2, + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "scale": 0, + "count": 11, + "sum": 158.63, + "positive": { + "offset": -1, + "bucket_counts": [2, 1, 3, 2, 0, 0, 3] + }, + "exemplars":[ + { + "time_unix_nano": 1000000001, + "as_double": 3.0, + "span_id": "aaaaaaaaaaaaaaaa", + "trace_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + }, + { + "time_unix_nano": 1000000003, + "as_double": 1.0, + "span_id": "cccccccccccccccc", + "trace_id": "cccccccccccccccccccccccccccccccc" + }, + { + "time_unix_nano": 1000000002, + "as_double": 1.5, + "span_id": "bbbbbbbbbbbbbbbb", + "trace_id": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } + ] + }] + } + }] + }] + }] + }`, + // The tests only allow one exemplar/series because it uses a map[series]exemplar as storage. Therefore only the exemplar "bbbbbbbbbbbbbbbb" is stored. + expect: `{ + "bucket": [ + { + "exemplar": { + "label": [ + { + "name": "span_id", + "value": "bbbbbbbbbbbbbbbb" + }, + { + "name": "trace_id", + "value": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } + ], + "value": 1.5 + } + } + ], + "positive_delta": [2, -1, 2, -1, -2, 0, 3], + "positive_span": [ + { + "length": 7, + "offset": 0 + } + ], + "sample_count": 11, + "sample_sum": 158.63, + "schema": 0, + "zero_count": 0, + "zero_threshold": 1e-128 + }`, + }, + { + name: "Exponential Histogram 2", + input: `{ + "resource_metrics": [{ + "scope_metrics": [{ + "metrics": [{ + "name": "test_exponential_histogram_2", + "exponential_histogram": { + "aggregation_temporality": 2, + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "scale": 2, + "count": 19, + "sum": 200, + "zero_count" : 5, + "zero_threshold": 0.1, + "positive": { + "offset": 3, + "bucket_counts": [0, 0, 0, 0, 2, 1, 1, 0, 3, 0, 0] + }, + "negative": { + "offset": 0, + "bucket_counts": [0, 4, 0, 2, 3, 0, 0, 3] + }, + "exemplars":[ + { + "time_unix_nano": 1000000001, + "as_double": 3.0, + "span_id": "aaaaaaaaaaaaaaaa", + "trace_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + } + ] + }] + } + }] + }] + }] + }`, + // zero_threshold is set to 1e-128 because dp.ZeroThreshold() is not yet available. + expect: `{ + "bucket": [ + { + "exemplar": { + "label": [ + { + "name": "span_id", + "value": "aaaaaaaaaaaaaaaa" + }, + { + "name": "trace_id", + "value": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + } + ], + "value": 3 + } + } + ], + "negative_delta": [0, 4, -4, 2, 1, -3, 0, 3], + "negative_span": [ + { + "length": 8, + "offset": 1 + } + ], + "positive_delta": [2, -1, 0, -1, 3, -3, 0], + "positive_span": [ + { + "length": 0, + "offset": 4 + }, + { + "length": 7, + "offset": 4 + } + ], + "sample_count": 19, + "sample_sum": 200, + "schema": 2, + "zero_count": 5, + "zero_threshold": 1e-128 + }`, + }, + } + decoder := &pmetric.JSONUnmarshaler{} + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + payload, err := decoder.UnmarshalMetrics([]byte(tc.input)) + require.NoError(t, err) + + var app testappender.Appender + l := util.TestLogger(t) + conv := convert.New(l, appenderAppendable{Inner: &app}, convert.Options{}) + require.NoError(t, conv.ConsumeMetrics(context.Background(), payload)) + + families, err := app.MetricFamilies() + require.NoError(t, err) + + require.NotEmpty(t, families) + require.NotNil(t, families[0]) + require.NotEmpty(t, families[0].Metric) + require.NotNil(t, families[0].Metric[0].Histogram) + histJsonRep, err := json.Marshal(families[0].Metric[0].Histogram) + require.NoError(t, err) + require.JSONEq(t, string(histJsonRep), tc.expect) + }) + } +} + // appenderAppendable always returns the same Appender. type appenderAppendable struct { Inner storage.Appender diff --git a/component/otelcol/exporter/prometheus/internal/convert/histograms.go b/component/otelcol/exporter/prometheus/internal/convert/histograms.go new file mode 100644 index 000000000000..168eae1844fd --- /dev/null +++ b/component/otelcol/exporter/prometheus/internal/convert/histograms.go @@ -0,0 +1,152 @@ +// THIS CODE IS COPIED AND ADAPTED FROM opentelemetry-contrib (https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/cfeecd887979e6f372b4a370c4562da92a2baf34/pkg/translator/prometheusremotewrite/histograms.go) +// see https://www.youtube.com/watch?v=W2_TpDcess8 for more information on the conversion + +package convert + +import ( + "fmt" + "math" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/value" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +const defaultZeroThreshold = 1e-128 + +// exponentialToNativeHistogram translates OTel Exponential Histogram data point +// to Prometheus Native Histogram. +func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (histogram.Histogram, error) { + scale := p.Scale() + if scale < -4 { + return histogram.Histogram{}, + fmt.Errorf("cannot convert exponential to native histogram."+ + " Scale must be >= -4, was %d", scale) + } + + var scaleDown int32 + if scale > 8 { + scaleDown = scale - 8 + scale = 8 + } + + pSpans, pDeltas := convertBucketsLayout(p.Positive(), scaleDown) + nSpans, nDeltas := convertBucketsLayout(p.Negative(), scaleDown) + + h := histogram.Histogram{ + Schema: scale, + + ZeroCount: p.ZeroCount(), + // TODO use zero_threshold, if set, see + // https://github.com/open-telemetry/opentelemetry-proto/pull/441 + ZeroThreshold: defaultZeroThreshold, + + PositiveSpans: pSpans, + PositiveBuckets: pDeltas, + NegativeSpans: nSpans, + NegativeBuckets: nDeltas, + } + + if p.Flags().NoRecordedValue() { + h.Sum = math.Float64frombits(value.StaleNaN) + h.Count = value.StaleNaN + } else { + if p.HasSum() { + h.Sum = p.Sum() + } + h.Count = p.Count() + } + return h, nil +} + +// convertBucketsLayout translates OTel Exponential Histogram dense buckets +// representation to Prometheus Native Histogram sparse bucket representation. +// +// The translation logic is taken from the client_golang `histogram.go#makeBuckets` +// function, see `makeBuckets` https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go +// The bucket indexes conversion was adjusted, since OTel exp. histogram bucket +// index 0 corresponds to the range (1, base] while Prometheus bucket index 0 +// to the range (base 1]. +func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) ([]histogram.Span, []int64) { + bucketCounts := buckets.BucketCounts() + if bucketCounts.Len() == 0 { + return nil, nil + } + + var ( + spans []histogram.Span + deltas []int64 + count int64 + prevCount int64 + ) + + appendDelta := func(count int64) { + spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + // Let the compiler figure out that this is const during this function by + // moving it into a local variable. + numBuckets := bucketCounts.Len() + + // The offset is scaled and adjusted by 1 as described above. + bucketIdx := buckets.Offset()>>scaleDown + 1 + spans = append(spans, histogram.Span{ + Offset: bucketIdx, + Length: 0, + }) + + for i := 0; i < numBuckets; i++ { + // The offset is scaled and adjusted by 1 as described above. + nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1 + if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. + count += int64(bucketCounts.At(i)) + continue + } + if count == 0 { + count = int64(bucketCounts.At(i)) + continue + } + + gap := nextBucketIdx - bucketIdx - 1 + if gap > 2 { + // We have to create a new span, because we have found a gap + // of more than two buckets. The constant 2 is copied from the logic in + // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 + spans = append(spans, histogram.Span{ + Offset: gap, + Length: 0, + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < gap; j++ { + appendDelta(0) + } + } + appendDelta(count) + count = int64(bucketCounts.At(i)) + bucketIdx = nextBucketIdx + } + // Need to use the last item's index. The offset is scaled and adjusted by 1 as described above. + gap := (int32(numBuckets)+buckets.Offset()-1)>>scaleDown + 1 - bucketIdx + if gap > 2 { + // We have to create a new span, because we have found a gap + // of more than two buckets. The constant 2 is copied from the logic in + // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 + spans = append(spans, histogram.Span{ + Offset: gap, + Length: 0, + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < gap; j++ { + appendDelta(0) + } + } + appendDelta(count) + + return spans, deltas +} diff --git a/component/otelcol/exporter/prometheus/prometheus.go b/component/otelcol/exporter/prometheus/prometheus.go index 0dd9ef49e69f..7da1c03868ea 100644 --- a/component/otelcol/exporter/prometheus/prometheus.go +++ b/component/otelcol/exporter/prometheus/prometheus.go @@ -19,10 +19,10 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.exporter.prometheus", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{labelstore.ServiceName}, + Name: "otelcol.exporter.prometheus", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, + Build: func(o component.Options, a component.Arguments) (component.Component, error) { return New(o, a.(Arguments)) }, @@ -31,21 +31,23 @@ func init() { // Arguments configures the otelcol.exporter.prometheus component. type Arguments struct { - IncludeTargetInfo bool `river:"include_target_info,attr,optional"` - IncludeScopeInfo bool `river:"include_scope_info,attr,optional"` - IncludeScopeLabels bool `river:"include_scope_labels,attr,optional"` - GCFrequency time.Duration `river:"gc_frequency,attr,optional"` - ForwardTo []storage.Appendable `river:"forward_to,attr"` - AddMetricSuffixes bool `river:"add_metric_suffixes,attr,optional"` + IncludeTargetInfo bool `river:"include_target_info,attr,optional"` + IncludeScopeInfo bool `river:"include_scope_info,attr,optional"` + IncludeScopeLabels bool `river:"include_scope_labels,attr,optional"` + GCFrequency time.Duration `river:"gc_frequency,attr,optional"` + ForwardTo []storage.Appendable `river:"forward_to,attr"` + AddMetricSuffixes bool `river:"add_metric_suffixes,attr,optional"` + ResourceToTelemetryConversion bool `river:"resource_to_telemetry_conversion,attr,optional"` } // DefaultArguments holds defaults values. var DefaultArguments = Arguments{ - IncludeTargetInfo: true, - IncludeScopeInfo: false, - IncludeScopeLabels: true, - GCFrequency: 5 * time.Minute, - AddMetricSuffixes: true, + IncludeTargetInfo: true, + IncludeScopeInfo: false, + IncludeScopeLabels: true, + GCFrequency: 5 * time.Minute, + AddMetricSuffixes: true, + ResourceToTelemetryConversion: false, } // SetToDefault implements river.Defaulter. @@ -151,8 +153,9 @@ func (c *Component) Update(newConfig component.Arguments) error { func convertArgumentsToConvertOptions(args Arguments) convert.Options { return convert.Options{ - IncludeTargetInfo: args.IncludeTargetInfo, - IncludeScopeInfo: args.IncludeScopeInfo, - AddMetricSuffixes: args.AddMetricSuffixes, + IncludeTargetInfo: args.IncludeTargetInfo, + IncludeScopeInfo: args.IncludeScopeInfo, + AddMetricSuffixes: args.AddMetricSuffixes, + ResourceToTelemetryConversion: args.ResourceToTelemetryConversion, } } diff --git a/component/otelcol/exporter/prometheus/prometheus_test.go b/component/otelcol/exporter/prometheus/prometheus_test.go index 2939c8962346..7e642ff9b585 100644 --- a/component/otelcol/exporter/prometheus/prometheus_test.go +++ b/component/otelcol/exporter/prometheus/prometheus_test.go @@ -23,12 +23,13 @@ func TestArguments_UnmarshalRiver(t *testing.T) { forward_to = [] `, expected: prometheus.Arguments{ - IncludeTargetInfo: true, - IncludeScopeInfo: false, - IncludeScopeLabels: true, - GCFrequency: 5 * time.Minute, - AddMetricSuffixes: true, - ForwardTo: []storage.Appendable{}, + IncludeTargetInfo: true, + IncludeScopeInfo: false, + IncludeScopeLabels: true, + GCFrequency: 5 * time.Minute, + AddMetricSuffixes: true, + ForwardTo: []storage.Appendable{}, + ResourceToTelemetryConversion: false, }, }, { @@ -39,15 +40,17 @@ func TestArguments_UnmarshalRiver(t *testing.T) { include_scope_labels = false gc_frequency = "1s" add_metric_suffixes = false + resource_to_telemetry_conversion = true forward_to = [] `, expected: prometheus.Arguments{ - IncludeTargetInfo: false, - IncludeScopeInfo: true, - IncludeScopeLabels: false, - GCFrequency: 1 * time.Second, - AddMetricSuffixes: false, - ForwardTo: []storage.Appendable{}, + IncludeTargetInfo: false, + IncludeScopeInfo: true, + IncludeScopeLabels: false, + GCFrequency: 1 * time.Second, + AddMetricSuffixes: false, + ForwardTo: []storage.Appendable{}, + ResourceToTelemetryConversion: true, }, }, { diff --git a/component/otelcol/internal/fanoutconsumer/logs.go b/component/otelcol/internal/fanoutconsumer/logs.go index a01202686e01..a8ee4df45b7f 100644 --- a/component/otelcol/internal/fanoutconsumer/logs.go +++ b/component/otelcol/internal/fanoutconsumer/logs.go @@ -29,6 +29,10 @@ func Logs(in []otelcol.Consumer) otelconsumer.Logs { for i := 0; i < len(in)-1; i++ { consumer := in[i] + if consumer == nil { + continue + } + if consumer.Capabilities().MutatesData { clone = append(clone, consumer) } else { @@ -40,10 +44,12 @@ func Logs(in []otelcol.Consumer) otelconsumer.Logs { // The final consumer can be given to the passthrough list regardless of // whether it mutates as long as there's no other read-only consumers. - if len(passthrough) == 0 || !last.Capabilities().MutatesData { - passthrough = append(passthrough, last) - } else { - clone = append(clone, last) + if last != nil { + if len(passthrough) == 0 || !last.Capabilities().MutatesData { + passthrough = append(passthrough, last) + } else { + clone = append(clone, last) + } } return &logsFanout{ diff --git a/component/otelcol/processor/attributes/attributes.go b/component/otelcol/processor/attributes/attributes.go index c71eec8e1415..93f774e54b55 100644 --- a/component/otelcol/processor/attributes/attributes.go +++ b/component/otelcol/processor/attributes/attributes.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor" otelcomponent "go.opentelemetry.io/collector/component" @@ -16,10 +15,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.attributes", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.attributes", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := attributesprocessor.NewFactory() diff --git a/component/otelcol/processor/batch/batch.go b/component/otelcol/processor/batch/batch.go index 6cc54fa5f333..3c205a0e4320 100644 --- a/component/otelcol/processor/batch/batch.go +++ b/component/otelcol/processor/batch/batch.go @@ -8,7 +8,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/processor/batchprocessor" @@ -16,10 +15,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.batch", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.batch", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := batchprocessor.NewFactory() diff --git a/component/otelcol/processor/filter/filter.go b/component/otelcol/processor/filter/filter.go index 864e9887688e..fe0927569558 100644 --- a/component/otelcol/processor/filter/filter.go +++ b/component/otelcol/processor/filter/filter.go @@ -4,7 +4,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" @@ -14,10 +13,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.filter", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.filter", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := filterprocessor.NewFactory() diff --git a/component/otelcol/processor/memorylimiter/memorylimiter.go b/component/otelcol/processor/memorylimiter/memorylimiter.go index 0321d41cb5c4..edf3bb1016d0 100644 --- a/component/otelcol/processor/memorylimiter/memorylimiter.go +++ b/component/otelcol/processor/memorylimiter/memorylimiter.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/processor/memorylimiterprocessor" @@ -17,10 +16,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.memory_limiter", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.memory_limiter", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := memorylimiterprocessor.NewFactory() diff --git a/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go b/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go index 72b8430de5e1..13321e6af49e 100644 --- a/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go +++ b/component/otelcol/processor/probabilistic_sampler/probabilistic_sampler.go @@ -5,7 +5,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" otelcomponent "go.opentelemetry.io/collector/component" @@ -14,10 +13,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.probabilistic_sampler", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.probabilistic_sampler", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := probabilisticsamplerprocessor.NewFactory() diff --git a/component/otelcol/processor/span/span.go b/component/otelcol/processor/span/span.go index c62e8e108527..833a899d2c25 100644 --- a/component/otelcol/processor/span/span.go +++ b/component/otelcol/processor/span/span.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor" otelcomponent "go.opentelemetry.io/collector/component" @@ -17,10 +16,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.span", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.span", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := spanprocessor.NewFactory() diff --git a/component/otelcol/processor/tail_sampling/tail_sampling.go b/component/otelcol/processor/tail_sampling/tail_sampling.go index 0a17a49bd3da..dc2f33bb661d 100644 --- a/component/otelcol/processor/tail_sampling/tail_sampling.go +++ b/component/otelcol/processor/tail_sampling/tail_sampling.go @@ -8,7 +8,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" tsp "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" @@ -16,10 +15,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.tail_sampling", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.tail_sampling", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := tsp.NewFactory() diff --git a/component/otelcol/processor/transform/transform.go b/component/otelcol/processor/transform/transform.go index 85f86e5ac1ca..222e7c3289a8 100644 --- a/component/otelcol/processor/transform/transform.go +++ b/component/otelcol/processor/transform/transform.go @@ -8,7 +8,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/processor" - otel_service "github.com/grafana/agent/service/otel" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" @@ -18,10 +17,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.processor.transform", - Args: Arguments{}, - Exports: otelcol.ConsumerExports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.processor.transform", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := transformprocessor.NewFactory() diff --git a/component/otelcol/receiver/jaeger/jaeger.go b/component/otelcol/receiver/jaeger/jaeger.go index 1858f7a909ba..2cebb37b9114 100644 --- a/component/otelcol/receiver/jaeger/jaeger.go +++ b/component/otelcol/receiver/jaeger/jaeger.go @@ -8,7 +8,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/receiver" - otel_service "github.com/grafana/agent/service/otel" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" otelcomponent "go.opentelemetry.io/collector/component" otelconfiggrpc "go.opentelemetry.io/collector/config/configgrpc" @@ -18,9 +17,8 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.receiver.jaeger", - Args: Arguments{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.receiver.jaeger", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := jaegerreceiver.NewFactory() diff --git a/component/otelcol/receiver/kafka/kafka.go b/component/otelcol/receiver/kafka/kafka.go index 2a41159b103b..2111f04c2579 100644 --- a/component/otelcol/receiver/kafka/kafka.go +++ b/component/otelcol/receiver/kafka/kafka.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/receiver" - otel_service "github.com/grafana/agent/service/otel" "github.com/grafana/river/rivertypes" "github.com/mitchellh/mapstructure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" @@ -18,9 +17,8 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.receiver.kafka", - Args: Arguments{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.receiver.kafka", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := kafkareceiver.NewFactory() diff --git a/component/otelcol/receiver/opencensus/opencensus.go b/component/otelcol/receiver/opencensus/opencensus.go index 63df3da32118..7f4c64ee0ace 100644 --- a/component/otelcol/receiver/opencensus/opencensus.go +++ b/component/otelcol/receiver/opencensus/opencensus.go @@ -6,7 +6,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/receiver" - otel_service "github.com/grafana/agent/service/otel" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension" @@ -14,9 +13,8 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.receiver.opencensus", - Args: Arguments{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.receiver.opencensus", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := opencensusreceiver.NewFactory() diff --git a/component/otelcol/receiver/otlp/otlp.go b/component/otelcol/receiver/otlp/otlp.go index 4ca6106551e3..bfdb20365ef8 100644 --- a/component/otelcol/receiver/otlp/otlp.go +++ b/component/otelcol/receiver/otlp/otlp.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/receiver" - otel_service "github.com/grafana/agent/service/otel" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/receiver/otlpreceiver" @@ -17,9 +16,8 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.receiver.otlp", - Args: Arguments{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.receiver.otlp", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := otlpreceiver.NewFactory() diff --git a/component/otelcol/receiver/prometheus/internal/appendable.go b/component/otelcol/receiver/prometheus/internal/appendable.go index bbcf6b9ab055..d8b26a2900bc 100644 --- a/component/otelcol/receiver/prometheus/internal/appendable.go +++ b/component/otelcol/receiver/prometheus/internal/appendable.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" @@ -21,17 +10,18 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" ) // appendable translates Prometheus scraping diffs into OpenTelemetry format. + type appendable struct { sink consumer.Metrics metricAdjuster MetricsAdjuster useStartTimeMetric bool + trimSuffixes bool startTimeMetricRegex *regexp.Regexp externalLabels labels.Labels @@ -46,17 +36,18 @@ func NewAppendable( gcInterval time.Duration, useStartTimeMetric bool, startTimeMetricRegex *regexp.Regexp, - receiverID component.ID, - externalLabels labels.Labels) (storage.Appendable, error) { + useCreatedMetric bool, + externalLabels labels.Labels, + trimSuffixes bool) (storage.Appendable, error) { var metricAdjuster MetricsAdjuster if !useStartTimeMetric { - metricAdjuster = NewInitialPointAdjuster(set.Logger, gcInterval) + metricAdjuster = NewInitialPointAdjuster(set.Logger, gcInterval, useCreatedMetric) } else { metricAdjuster = NewStartTimeMetricAdjuster(set.Logger, startTimeMetricRegex) } - obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ReceiverID: receiverID, Transport: transport, ReceiverCreateSettings: set}) + obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ReceiverID: set.ID, Transport: transport, ReceiverCreateSettings: set}) if err != nil { return nil, err } @@ -69,9 +60,10 @@ func NewAppendable( startTimeMetricRegex: startTimeMetricRegex, externalLabels: externalLabels, obsrecv: obsrecv, + trimSuffixes: trimSuffixes, }, nil } func (o *appendable) Appender(ctx context.Context) storage.Appender { - return newTransaction(ctx, o.metricAdjuster, o.sink, o.externalLabels, o.settings, o.obsrecv) + return newTransaction(ctx, o.metricAdjuster, o.sink, o.externalLabels, o.settings, o.obsrecv, o.trimSuffixes) } diff --git a/component/otelcol/receiver/prometheus/internal/doc.go b/component/otelcol/receiver/prometheus/internal/doc.go index c3e2d1419364..f4b8aefe77ad 100644 --- a/component/otelcol/receiver/prometheus/internal/doc.go +++ b/component/otelcol/receiver/prometheus/internal/doc.go @@ -1,5 +1,5 @@ // Package internal is a near copy of -// https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.61.0/receiver/prometheusreceiver/internal +// https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/receiver/prometheusreceiver/internal // A copy was made because the upstream package is internal. If it is ever made // public, our copy can be removed. // diff --git a/component/otelcol/receiver/prometheus/internal/logger.go b/component/otelcol/receiver/prometheus/internal/logger.go index 5cfb210742be..726d236574df 100644 --- a/component/otelcol/receiver/prometheus/internal/logger.go +++ b/component/otelcol/receiver/prometheus/internal/logger.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" diff --git a/component/otelcol/receiver/prometheus/internal/logger_test.go b/component/otelcol/receiver/prometheus/internal/logger_test.go index 9913080bf0c8..5a17fd051a27 100644 --- a/component/otelcol/receiver/prometheus/internal/logger_test.go +++ b/component/otelcol/receiver/prometheus/internal/logger_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal diff --git a/component/otelcol/receiver/prometheus/internal/metadata.go b/component/otelcol/receiver/prometheus/internal/metadata.go index 4cd3a012bdea..cea58a0e1dc9 100644 --- a/component/otelcol/receiver/prometheus/internal/metadata.go +++ b/component/otelcol/receiver/prometheus/internal/metadata.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" diff --git a/component/otelcol/receiver/prometheus/internal/metricfamily.go b/component/otelcol/receiver/prometheus/internal/metricfamily.go index 461f8c9253fa..e501ee5da384 100644 --- a/component/otelcol/receiver/prometheus/internal/metricfamily.go +++ b/component/otelcol/receiver/prometheus/internal/metricfamily.go @@ -1,30 +1,29 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" import ( + "encoding/hex" "fmt" + "math" "sort" "strings" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/scrape" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" +) + +const ( + traceIDKey = "trace_id" + spanIDKey = "span_id" ) type metricFamily struct { @@ -41,15 +40,17 @@ type metricFamily struct { // a couple data complexValue (buckets and count/sum), a group of a metric family always share a same set of tags. for // simple types like counter and gauge, each data point is a group of itself type metricGroup struct { - family *metricFamily + mtype pmetric.MetricType ts int64 ls labels.Labels count float64 hasCount bool sum float64 hasSum bool + created float64 value float64 complexValue []*dataPoint + exemplars pmetric.ExemplarSlice } func newMetricFamily(metricName string, mc scrape.MetricMetadataStore, logger *zap.Logger) *metricFamily { @@ -79,12 +80,6 @@ func (mf *metricFamily) includesMetric(metricName string) bool { return metricName == mf.name } -func (mf *metricFamily) getGroupKey(ls labels.Labels) uint64 { - bytes := make([]byte, 0, 2048) - hash, _ := ls.HashWithoutLabels(bytes, getSortedNotUsefulLabels(mf.mtype)...) - return hash -} - func (mg *metricGroup) sortPoints() { sort.Slice(mg.complexValue, func(i, j int) bool { return mg.complexValue[i].boundary < mg.complexValue[j].boundary @@ -92,25 +87,28 @@ func (mg *metricGroup) sortPoints() { } func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) { - if !mg.hasCount || len(mg.complexValue) == 0 { + if !mg.hasCount { return } mg.sortPoints() - // for OCAgent Proto, the bounds won't include +inf - // TODO: (@odeke-em) should we also check OpenTelemetry Pdata for bucket bounds? - bounds := make([]float64, len(mg.complexValue)-1) - bucketCounts := make([]uint64, len(mg.complexValue)) + bucketCount := len(mg.complexValue) + 1 + // if the final bucket is +Inf, we ignore it + if bucketCount > 1 && mg.complexValue[bucketCount-2].boundary == math.Inf(1) { + bucketCount-- + } + + // for OTLP the bounds won't include +inf + bounds := make([]float64, bucketCount-1) + bucketCounts := make([]uint64, bucketCount) + var adjustedCount float64 pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count) + for i := 0; i < bucketCount-1; i++ { + bounds[i] = mg.complexValue[i].boundary + adjustedCount = mg.complexValue[i].value - for i := 0; i < len(mg.complexValue); i++ { - if i != len(mg.complexValue)-1 { - // not need to add +inf as bound to oc proto - bounds[i] = mg.complexValue[i].boundary - } - adjustedCount := mg.complexValue[i].value // Buckets still need to be sent to know to set them as stale, // but a staleness NaN converted to uint64 would be an extremely large number. // Setting to 0 instead. @@ -122,6 +120,15 @@ func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) bucketCounts[i] = uint64(adjustedCount) } + // Add the final bucket based on the total count + adjustedCount = mg.count + if pointIsStale { + adjustedCount = 0 + } else if bucketCount > 1 { + adjustedCount -= mg.complexValue[bucketCount-2].value + } + bucketCounts[bucketCount-1] = uint64(adjustedCount) + point := dest.AppendEmpty() if pointIsStale { @@ -138,9 +145,24 @@ func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) // The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds. tsNanos := timestampFromMs(mg.ts) - point.SetStartTimestamp(tsNanos) // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } point.SetTimestamp(tsNanos) populateAttributes(pmetric.MetricTypeHistogram, mg.ls, point.Attributes()) + mg.setExemplars(point.Exemplars()) +} + +func (mg *metricGroup) setExemplars(exemplars pmetric.ExemplarSlice) { + if mg == nil { + return + } + if mg.exemplars.Len() > 0 { + mg.exemplars.MoveAndAppendTo(exemplars) + } } func (mg *metricGroup) toSummaryPoint(dest pmetric.SummaryDataPointSlice) { @@ -183,7 +205,12 @@ func (mg *metricGroup) toSummaryPoint(dest pmetric.SummaryDataPointSlice) { // The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds. tsNanos := timestampFromMs(mg.ts) point.SetTimestamp(tsNanos) - point.SetStartTimestamp(tsNanos) // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } populateAttributes(pmetric.MetricTypeSummary, mg.ls, point.Attributes()) } @@ -191,8 +218,13 @@ func (mg *metricGroup) toNumberDataPoint(dest pmetric.NumberDataPointSlice) { tsNanos := timestampFromMs(mg.ts) point := dest.AppendEmpty() // gauge/undefined types have no start time. - if mg.family.mtype == pmetric.MetricTypeSum { - point.SetStartTimestamp(tsNanos) // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + if mg.mtype == pmetric.MetricTypeSum { + if mg.created != 0 { + point.SetStartTimestamp(timestampFromFloat64(mg.created)) + } else { + // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp + point.SetStartTimestamp(tsNanos) + } } point.SetTimestamp(tsNanos) if value.IsStaleNaN(mg.value) { @@ -201,6 +233,7 @@ func (mg *metricGroup) toNumberDataPoint(dest pmetric.NumberDataPointSlice) { point.SetDoubleValue(mg.value) } populateAttributes(pmetric.MetricTypeGauge, mg.ls, point.Attributes()) + mg.setExemplars(point.Exemplars()) } func populateAttributes(mType pmetric.MetricType, ls labels.Labels, dest pcommon.Map) { @@ -226,9 +259,10 @@ func (mf *metricFamily) loadMetricGroupOrCreate(groupKey uint64, ls labels.Label mg, ok := mf.groups[groupKey] if !ok { mg = &metricGroup{ - family: mf, - ts: ts, - ls: ls, + mtype: mf.mtype, + ts: ts, + ls: ls, + exemplars: pmetric.NewExemplarSlice(), } mf.groups[groupKey] = mg // maintaining data insertion order is helpful to generate stable/reproducible metric output @@ -237,9 +271,8 @@ func (mf *metricFamily) loadMetricGroupOrCreate(groupKey uint64, ls labels.Label return mg } -func (mf *metricFamily) Add(metricName string, ls labels.Labels, t int64, v float64) error { - groupKey := mf.getGroupKey(ls) - mg := mf.loadMetricGroupOrCreate(groupKey, ls, t) +func (mf *metricFamily) addSeries(seriesRef uint64, metricName string, ls labels.Labels, t int64, v float64) error { + mg := mf.loadMetricGroupOrCreate(seriesRef, ls, t) if mg.ts != t { return fmt.Errorf("inconsistent timestamps on metric points for metric %v", metricName) } @@ -254,6 +287,8 @@ func (mf *metricFamily) Add(metricName string, ls labels.Labels, t int64, v floa mg.ts = t mg.count = v mg.hasCount = true + case strings.HasSuffix(metricName, metricSuffixCreated): + mg.created = v default: boundary, err := getBoundary(mf.mtype, ls) if err != nil { @@ -261,6 +296,14 @@ func (mf *metricFamily) Add(metricName string, ls labels.Labels, t int64, v floa } mg.complexValue = append(mg.complexValue, &dataPoint{value: v, boundary: boundary}) } + case pmetric.MetricTypeSum: + if strings.HasSuffix(metricName, metricSuffixCreated) { + mg.created = v + } else { + mg.value = v + } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeExponentialHistogram: + fallthrough default: mg.value = v } @@ -268,13 +311,18 @@ func (mf *metricFamily) Add(metricName string, ls labels.Labels, t int64, v floa return nil } -func (mf *metricFamily) appendMetric(metrics pmetric.MetricSlice) { +func (mf *metricFamily) appendMetric(metrics pmetric.MetricSlice, trimSuffixes bool) { metric := pmetric.NewMetric() - metric.SetName(mf.name) + // Trims type and unit suffixes from metric name + name := mf.name + if trimSuffixes { + name = prometheus.TrimPromSuffixes(name, mf.mtype, mf.metadata.Unit) + } + metric.SetName(name) metric.SetDescription(mf.metadata.Help) - metric.SetUnit(mf.metadata.Unit) + metric.SetUnit(prometheus.UnitWordToUCUM(mf.metadata.Unit)) - pointCount := 0 + var pointCount int switch mf.mtype { case pmetric.MetricTypeHistogram: @@ -304,6 +352,8 @@ func (mf *metricFamily) appendMetric(metrics pmetric.MetricSlice) { } pointCount = sdpL.Len() + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeExponentialHistogram: + fallthrough default: // Everything else should be set to a Gauge. gauge := metric.SetEmptyGauge() gdpL := gauge.DataPoints() @@ -319,3 +369,58 @@ func (mf *metricFamily) appendMetric(metrics pmetric.MetricSlice) { metric.MoveTo(metrics.AppendEmpty()) } + +func (mf *metricFamily) addExemplar(seriesRef uint64, e exemplar.Exemplar) { + mg := mf.groups[seriesRef] + if mg == nil { + return + } + es := mg.exemplars + convertExemplar(e, es.AppendEmpty()) +} + +func convertExemplar(pe exemplar.Exemplar, e pmetric.Exemplar) { + e.SetTimestamp(timestampFromMs(pe.Ts)) + e.SetDoubleValue(pe.Value) + e.FilteredAttributes().EnsureCapacity(len(pe.Labels)) + for _, lb := range pe.Labels { + switch strings.ToLower(lb.Name) { + case traceIDKey: + var tid [16]byte + err := decodeAndCopyToLowerBytes(tid[:], []byte(lb.Value)) + if err == nil { + e.SetTraceID(tid) + } else { + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + case spanIDKey: + var sid [8]byte + err := decodeAndCopyToLowerBytes(sid[:], []byte(lb.Value)) + if err == nil { + e.SetSpanID(sid) + } else { + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + default: + e.FilteredAttributes().PutStr(lb.Name, lb.Value) + } + } +} + +/* + decodeAndCopyToLowerBytes copies src to dst on lower bytes instead of higher + +1. If len(src) > len(dst) -> copy first len(dst) bytes as it is. Example -> src = []byte{0xab,0xcd,0xef,0xgh,0xij}, dst = [2]byte, result dst = [2]byte{0xab, 0xcd} +2. If len(src) = len(dst) -> copy src to dst as it is +3. If len(src) < len(dst) -> prepend required 0s and then add src to dst. Example -> src = []byte{0xab, 0xcd}, dst = [8]byte, result dst = [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd} +*/ +func decodeAndCopyToLowerBytes(dst []byte, src []byte) error { + var err error + decodedLen := hex.DecodedLen(len(src)) + if decodedLen >= len(dst) { + _, err = hex.Decode(dst, src[:hex.EncodedLen(len(dst))]) + } else { + _, err = hex.Decode(dst[len(dst)-decodedLen:], src) + } + return err +} diff --git a/component/otelcol/receiver/prometheus/internal/metricfamily_test.go b/component/otelcol/receiver/prometheus/internal/metricfamily_test.go index 4dc336c5c4fa..10c0f9579480 100644 --- a/component/otelcol/receiver/prometheus/internal/metricfamily_test.go +++ b/component/otelcol/receiver/prometheus/internal/metricfamily_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal @@ -69,6 +58,12 @@ var mc = testMetadataStore{ Help: "This is some help for a histogram", Unit: "ms", }, + "histogram_with_created": scrape.MetricMetadata{ + Metric: "hg", + Type: textparse.MetricTypeHistogram, + Help: "This is some help for a histogram", + Unit: "ms", + }, "histogram_stale": scrape.MetricMetadata{ Metric: "hg_stale", Type: textparse.MetricTypeHistogram, @@ -81,6 +76,12 @@ var mc = testMetadataStore{ Help: "This is some help for a summary", Unit: "ms", }, + "summary_with_created": scrape.MetricMetadata{ + Metric: "s", + Type: textparse.MetricTypeSummary, + Help: "This is some help for a summary", + Unit: "ms", + }, "summary_stale": scrape.MetricMetadata{ Metric: "s_stale", Type: textparse.MetricTypeSummary, @@ -137,6 +138,49 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { return point }, }, + { + name: "histogram with startTimestamp from _created", + metricName: "histogram_with_created", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A"}), + scrapes: []*scrape{ + {at: 11, value: 66, metric: "histogram_with_created_count"}, + {at: 11, value: 1004.78, metric: "histogram_with_created_sum"}, + {at: 11, value: 600.78, metric: "histogram_with_created_created"}, + { + at: 11, + value: 33, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "0.75"}, + }, + { + at: 11, + value: 55, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "2.75"}, + }, + { + at: 11, + value: 66, + metric: "histogram_with_created_bucket", + extraLabel: labels.Label{Name: "le", Value: "+Inf"}}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(600.78)) + + point.ExplicitBounds().FromRaw([]float64{0.75, 2.75}) + point.BucketCounts().FromRaw([]uint64{33, 22, 11}) + attributes := point.Attributes() + attributes.PutStr("a", "A") + return point + }, + }, { name: "histogram that is stale", metricName: "histogram_stale", @@ -174,6 +218,28 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { }, wantErr: true, }, + { + name: "histogram without buckets", + metricName: "histogram", + intervalStartTimeMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 11, value: 66, metric: "histogram_count"}, + {at: 11, value: 1004.78, metric: "histogram_sum"}, + }, + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetCount(66) + point.SetSum(1004.78) + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.BucketCounts().FromRaw([]uint64{66}) + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, } for _, tt := range tests { @@ -187,7 +253,8 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { } else { lbls = tt.labels.Copy() } - err := mp.Add(tv.metric, lbls, tv.at, tv.value) + sRef, _ := getSeriesRef(nil, lbls, mp.mtype) + err := mp.addSeries(sRef, tv.metric, lbls, tv.at, tv.value) if tt.wantErr { if i != 0 { require.Error(t, err) @@ -202,11 +269,9 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { } require.Len(t, mp.groups, 1) - groupKey := mp.getGroupKey(tt.labels.Copy()) - require.NotNil(t, mp.groups[groupKey]) sl := pmetric.NewMetricSlice() - mp.appendMetric(sl) + mp.appendMetric(sl, false) require.Equal(t, 1, sl.Len(), "Exactly one metric expected") metric := sl.At(0) @@ -308,6 +373,79 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { return point }, }, + { + name: "summary_with_created", + labelsScrapes: []*labelsScrapes{ + { + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 10, metric: "summary_with_created_count"}, + {at: 14, value: 15, metric: "summary_with_created_sum"}, + {at: 14, value: 150, metric: "summary_with_created_created"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.0", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 8, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.75", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 33.7, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.50", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 27, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.90", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 56, metric: "value"}, + }, + }, + { + labels: labels.FromMap(map[string]string{"a": "A", "quantile": "0.99", "b": "B"}), + scrapes: []*scrape{ + {at: 14, value: 82, metric: "value"}, + }, + }, + }, + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() + point.SetCount(10) + point.SetSum(15) + qtL := point.QuantileValues() + qn0 := qtL.AppendEmpty() + qn0.SetQuantile(0) + qn0.SetValue(8) + qn50 := qtL.AppendEmpty() + qn50.SetQuantile(.5) + qn50.SetValue(27) + qn75 := qtL.AppendEmpty() + qn75.SetQuantile(.75) + qn75.SetValue(33.7) + qn90 := qtL.AppendEmpty() + qn90.SetQuantile(.9) + qn90.SetValue(56) + qn99 := qtL.AppendEmpty() + qn99.SetQuantile(.99) + qn99.SetValue(82) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(150)) + + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, { name: "summary_stale", labelsScrapes: []*labelsScrapes{ @@ -400,7 +538,9 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { mp := newMetricFamily(tt.name, mc, zap.NewNop()) for _, lbs := range tt.labelsScrapes { for i, scrape := range lbs.scrapes { - err := mp.Add(scrape.metric, lbs.labels.Copy(), scrape.at, scrape.value) + lb := lbs.labels.Copy() + sRef, _ := getSeriesRef(nil, lb, mp.mtype) + err := mp.addSeries(sRef, scrape.metric, lb, scrape.at, scrape.value) if tt.wantErr { // The first scrape won't have an error if i != 0 { @@ -417,11 +557,9 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { } require.Len(t, mp.groups, 1) - groupKey := mp.getGroupKey(tt.labelsScrapes[0].labels.Copy()) - require.NotNil(t, mp.groups[groupKey]) sl := pmetric.NewMetricSlice() - mp.appendMetric(sl) + mp.appendMetric(sl, false) require.Equal(t, 1, sl.Len(), "Exactly one metric expected") metric := sl.At(0) @@ -451,6 +589,29 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { intervalStartTimestampMs int64 want func() pmetric.NumberDataPoint }{ + { + metricKind: "counter", + name: "counter:: startTimestampMs from _created", + intervalStartTimestampMs: 11, + labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}), + scrapes: []*scrape{ + {at: 13, value: 33.7, metric: "value"}, + {at: 13, value: 150, metric: "value_created"}, + }, + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() + point.SetDoubleValue(33.7) + + // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(13 * time.Millisecond)) + point.SetStartTimestamp(timestampFromFloat64(150)) + + attributes := point.Attributes() + attributes.PutStr("a", "A") + attributes.PutStr("b", "B") + return point + }, + }, { metricKind: "counter", name: "counter:: startTimestampMs of 11", @@ -496,15 +657,15 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { t.Run(tt.name, func(t *testing.T) { mp := newMetricFamily(tt.metricKind, mc, zap.NewNop()) for _, tv := range tt.scrapes { - require.NoError(t, mp.Add(tv.metric, tt.labels.Copy(), tv.at, tv.value)) + lb := tt.labels.Copy() + sRef, _ := getSeriesRef(nil, lb, mp.mtype) + require.NoError(t, mp.addSeries(sRef, tv.metric, lb, tv.at, tv.value)) } require.Len(t, mp.groups, 1) - groupKey := mp.getGroupKey(tt.labels.Copy()) - require.NotNil(t, mp.groups[groupKey]) sl := pmetric.NewMetricSlice() - mp.appendMetric(sl) + mp.appendMetric(sl, false) require.Equal(t, 1, sl.Len(), "Exactly one metric expected") metric := sl.At(0) diff --git a/component/otelcol/receiver/prometheus/internal/metrics_adjuster.go b/component/otelcol/receiver/prometheus/internal/metrics_adjuster.go index 1c0df5817a5b..a483b9588cff 100644 --- a/component/otelcol/receiver/prometheus/internal/metrics_adjuster.go +++ b/component/otelcol/receiver/prometheus/internal/metrics_adjuster.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" @@ -19,16 +8,13 @@ import ( "sync" "time" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.6.1" "go.uber.org/zap" -) -// The code in this file has been heavily inspired by Otel Collector: -// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/prometheusreceiver/internal/metrics_adjuster.go -// In case of issues or changes check the file against the Collector to see if it was also updated. + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) // Notes on garbage collection (gc): // @@ -133,7 +119,7 @@ func (tsm *timeseriesMap) get(metric pmetric.Metric, kv pcommon.Map) (*timeserie return tsi, ok } -// Create a unique timeseries signature consisting of the metric name and label values. +// Create a unique string signature for attributes values sorted by attribute keys. func getAttributesSignature(m pcommon.Map) [16]byte { clearedMap := pcommon.NewMap() m.Range(func(k string, attrValue pcommon.Value) bool { @@ -238,8 +224,6 @@ func (jm *JobsMap) get(job, instance string) *timeseriesMap { return tsm2 } -// MetricsAdjuster adjusts the start time of metrics when converting between -// Prometheus and OTel. type MetricsAdjuster interface { AdjustMetrics(metrics pmetric.Metrics) error } @@ -248,21 +232,23 @@ type MetricsAdjuster interface { // and provides AdjustMetricSlice, which takes a sequence of metrics and adjust their start times based on // the initial points. type initialPointAdjuster struct { - jobsMap *JobsMap - logger *zap.Logger + jobsMap *JobsMap + logger *zap.Logger + useCreatedMetric bool } // NewInitialPointAdjuster returns a new MetricsAdjuster that adjust metrics' start times based on the initial received points. -func NewInitialPointAdjuster(logger *zap.Logger, gcInterval time.Duration) MetricsAdjuster { +func NewInitialPointAdjuster(logger *zap.Logger, gcInterval time.Duration, useCreatedMetric bool) MetricsAdjuster { return &initialPointAdjuster{ - jobsMap: NewJobsMap(gcInterval), - logger: logger, + jobsMap: NewJobsMap(gcInterval), + logger: logger, + useCreatedMetric: useCreatedMetric, } } // AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and // previous points in the timeseriesMap. -func (ma *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { +func (a *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { // By contract metrics will have at least 1 data point, so for sure will have at least one ResourceMetrics. job, found := metrics.ResourceMetrics().At(0).Resource().Attributes().Get(semconv.AttributeServiceName) @@ -274,7 +260,7 @@ func (ma *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { if !found { return errors.New("adjusting metrics without instance") } - tsm := ma.jobsMap.get(job.Str(), instance.Str()) + tsm := a.jobsMap.get(job.Str(), instance.Str()) // The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that // nothing else can modify the data used for adjustment. @@ -291,17 +277,20 @@ func (ma *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { // gauges don't need to be adjusted so no additional processing is necessary case pmetric.MetricTypeHistogram: - adjustMetricHistogram(tsm, metric) + a.adjustMetricHistogram(tsm, metric) case pmetric.MetricTypeSummary: - adjustMetricSummary(tsm, metric) + a.adjustMetricSummary(tsm, metric) case pmetric.MetricTypeSum: - adjustMetricSum(tsm, metric) + a.adjustMetricSum(tsm, metric) + + case pmetric.MetricTypeEmpty, pmetric.MetricTypeExponentialHistogram: + fallthrough default: // this shouldn't happen - ma.logger.Info("Adjust - skipping unexpected point", zap.String("type", dataType.String())) + a.logger.Info("Adjust - skipping unexpected point", zap.String("type", dataType.String())) } } } @@ -309,7 +298,7 @@ func (ma *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { return nil } -func adjustMetricHistogram(tsm *timeseriesMap, current pmetric.Metric) { +func (a *initialPointAdjuster) adjustMetricHistogram(tsm *timeseriesMap, current pmetric.Metric) { histogram := current.Histogram() if histogram.AggregationTemporality() != pmetric.AggregationTemporalityCumulative { // Only dealing with CumulativeDistributions. @@ -319,6 +308,15 @@ func adjustMetricHistogram(tsm *timeseriesMap, current pmetric.Metric) { currentPoints := histogram.DataPoints() for i := 0; i < currentPoints.Len(); i++ { currentDist := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentDist.Flags().NoRecordedValue() && + currentDist.StartTimestamp() < currentDist.Timestamp() { + + continue + } + tsi, found := tsm.get(current, currentDist.Attributes()) if !found { // initialize everything. @@ -349,10 +347,19 @@ func adjustMetricHistogram(tsm *timeseriesMap, current pmetric.Metric) { } } -func adjustMetricSum(tsm *timeseriesMap, current pmetric.Metric) { +func (a *initialPointAdjuster) adjustMetricSum(tsm *timeseriesMap, current pmetric.Metric) { currentPoints := current.Sum().DataPoints() for i := 0; i < currentPoints.Len(); i++ { currentSum := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentSum.Flags().NoRecordedValue() && + currentSum.StartTimestamp() < currentSum.Timestamp() { + + continue + } + tsi, found := tsm.get(current, currentSum.Attributes()) if !found { // initialize everything. @@ -380,11 +387,20 @@ func adjustMetricSum(tsm *timeseriesMap, current pmetric.Metric) { } } -func adjustMetricSummary(tsm *timeseriesMap, current pmetric.Metric) { +func (a *initialPointAdjuster) adjustMetricSummary(tsm *timeseriesMap, current pmetric.Metric) { currentPoints := current.Summary().DataPoints() for i := 0; i < currentPoints.Len(); i++ { currentSummary := currentPoints.At(i) + + // start timestamp was set from _created + if a.useCreatedMetric && + !currentSummary.Flags().NoRecordedValue() && + currentSummary.StartTimestamp() < currentSummary.Timestamp() { + + continue + } + tsi, found := tsm.get(current, currentSummary.Attributes()) if !found { // initialize everything. diff --git a/component/otelcol/receiver/prometheus/internal/metrics_adjuster_test.go b/component/otelcol/receiver/prometheus/internal/metrics_adjuster_test.go index 2504bc721226..df38dea9e968 100644 --- a/component/otelcol/receiver/prometheus/internal/metrics_adjuster_test.go +++ b/component/otelcol/receiver/prometheus/internal/metrics_adjuster_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal @@ -19,7 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.8.0" "go.uber.org/zap" @@ -79,7 +68,7 @@ func TestGauge(t *testing.T) { adjusted: metrics(gaugeMetric(gauge1, doublePoint(k1v1k2v2, t3, t3, 55))), }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestSum(t *testing.T) { @@ -110,7 +99,7 @@ func TestSum(t *testing.T) { adjusted: metrics(sumMetric(sum1, doublePoint(k1v1k2v2, t3, t5, 72))), }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestSummaryNoCount(t *testing.T) { @@ -137,7 +126,7 @@ func TestSummaryNoCount(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestSummaryFlagNoRecordedValue(t *testing.T) { @@ -154,7 +143,7 @@ func TestSummaryFlagNoRecordedValue(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestSummary(t *testing.T) { @@ -197,7 +186,7 @@ func TestSummary(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestHistogram(t *testing.T) { @@ -220,7 +209,7 @@ func TestHistogram(t *testing.T) { adjusted: metrics(histogramMetric(histogram1, histogramPoint(k1v1k2v2, t3, t4, bounds0, []uint64{7, 4, 2, 12}))), }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestHistogramFlagNoRecordedValue(t *testing.T) { @@ -237,7 +226,7 @@ func TestHistogramFlagNoRecordedValue(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestHistogramFlagNoRecordedValueFirstObservation(t *testing.T) { @@ -254,7 +243,7 @@ func TestHistogramFlagNoRecordedValueFirstObservation(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestSummaryFlagNoRecordedValueFirstObservation(t *testing.T) { @@ -271,7 +260,7 @@ func TestSummaryFlagNoRecordedValueFirstObservation(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestGaugeFlagNoRecordedValueFirstObservation(t *testing.T) { @@ -288,7 +277,7 @@ func TestGaugeFlagNoRecordedValueFirstObservation(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestSumFlagNoRecordedValueFirstObservation(t *testing.T) { @@ -305,7 +294,7 @@ func TestSumFlagNoRecordedValueFirstObservation(t *testing.T) { }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestMultiMetrics(t *testing.T) { @@ -369,7 +358,7 @@ func TestMultiMetrics(t *testing.T) { ), }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestNewDataPointsAdded(t *testing.T) { @@ -431,7 +420,7 @@ func TestNewDataPointsAdded(t *testing.T) { ), }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestMultiTimeseries(t *testing.T) { @@ -490,7 +479,7 @@ func TestMultiTimeseries(t *testing.T) { ), }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestEmptyLabels(t *testing.T) { @@ -516,7 +505,7 @@ func TestEmptyLabels(t *testing.T) { adjusted: metrics(sumMetric(sum1, doublePoint(k1vEmptyk2vEmptyk3vEmpty, t1, t3, 88))), }, } - runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute), "job", "0", script) + runScript(t, NewInitialPointAdjuster(zap.NewNop(), time.Minute, true), "job", "0", script) } func TestTsGC(t *testing.T) { @@ -570,7 +559,7 @@ func TestTsGC(t *testing.T) { }, } - ma := NewInitialPointAdjuster(zap.NewNop(), time.Minute) + ma := NewInitialPointAdjuster(zap.NewNop(), time.Minute, true) // run round 1 runScript(t, ma, "job", "0", script1) @@ -630,7 +619,7 @@ func TestJobGC(t *testing.T) { } gcInterval := 10 * time.Millisecond - ma := NewInitialPointAdjuster(zap.NewNop(), gcInterval) + ma := NewInitialPointAdjuster(zap.NewNop(), gcInterval, true) // run job 1, round 1 - all entries marked runScript(t, ma, "job1", "0", job1Script1) @@ -654,13 +643,6 @@ type metricsAdjusterTest struct { adjusted pmetric.Metrics } -func marshalMetric(t *testing.T, m pmetric.Metrics) string { - jm := &pmetric.JSONMarshaler{} - bytes, err := jm.MarshalMetrics(m) - assert.NoError(t, err) - return string(bytes) -} - func runScript(t *testing.T, ma MetricsAdjuster, job, instance string, tests []*metricsAdjusterTest) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { @@ -674,7 +656,26 @@ func runScript(t *testing.T, ma MetricsAdjuster, job, instance string, tests []* // Add the instance/job to the expected metrics as well. test.adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceInstanceID, instance) test.adjusted.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceName, job) - require.JSONEq(t, marshalMetric(t, test.adjusted), marshalMetric(t, adjusted)) + assert.EqualValues(t, test.adjusted, adjusted) }) } } + +func BenchmarkGetAttributesSignature(b *testing.B) { + attrs := pcommon.NewMap() + attrs.PutStr("key1", "some-random-test-value-1") + attrs.PutStr("key2", "some-random-test-value-2") + attrs.PutStr("key6", "some-random-test-value-6") + attrs.PutStr("key3", "some-random-test-value-3") + attrs.PutStr("key4", "some-random-test-value-4") + attrs.PutStr("key5", "some-random-test-value-5") + attrs.PutStr("key7", "some-random-test-value-7") + attrs.PutStr("key8", "some-random-test-value-8") + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + getAttributesSignature(attrs) + } +} diff --git a/component/otelcol/receiver/prometheus/internal/metricsutil_test.go b/component/otelcol/receiver/prometheus/internal/metricsutil_test.go index ea29c0e61171..4ba25cfe846e 100644 --- a/component/otelcol/receiver/prometheus/internal/metricsutil_test.go +++ b/component/otelcol/receiver/prometheus/internal/metricsutil_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal diff --git a/component/otelcol/receiver/prometheus/internal/prom_to_otlp.go b/component/otelcol/receiver/prometheus/internal/prom_to_otlp.go index 9b14cd9d053c..1b0d00a589ea 100644 --- a/component/otelcol/receiver/prometheus/internal/prom_to_otlp.go +++ b/component/otelcol/receiver/prometheus/internal/prom_to_otlp.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" diff --git a/component/otelcol/receiver/prometheus/internal/prom_to_otlp_test.go b/component/otelcol/receiver/prometheus/internal/prom_to_otlp_test.go index 63f167ca5363..a532637e1e2d 100644 --- a/component/otelcol/receiver/prometheus/internal/prom_to_otlp_test.go +++ b/component/otelcol/receiver/prometheus/internal/prom_to_otlp_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal @@ -284,7 +273,6 @@ func TestCreateNodeAndResourcePromToOTLP(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got := CreateResource(tt.job, tt.instance, tt.sdLabels) require.Equal(t, tt.want.Attributes().AsRaw(), got.Attributes().AsRaw()) - require.Equal(t, tt.want.DroppedAttributesCount(), got.DroppedAttributesCount()) }) } } diff --git a/component/otelcol/receiver/prometheus/internal/staleness_end_to_end_test.go b/component/otelcol/receiver/prometheus/internal/staleness_end_to_end_test.go new file mode 100644 index 000000000000..224c3cd4e1f0 --- /dev/null +++ b/component/otelcol/receiver/prometheus/internal/staleness_end_to_end_test.go @@ -0,0 +1,241 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal_test + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + "go.uber.org/atomic" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/provider/fileprovider" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver" +) + +// Test that staleness markers are emitted for timeseries that intermittently disappear. +// This test runs the entire collector and end-to-end scrapes then checks with the +// Prometheus remotewrite exporter that staleness markers are emitted per timeseries. +// See https://github.com/open-telemetry/opentelemetry-collector/issues/3413 +func TestStalenessMarkersEndToEnd(t *testing.T) { + if testing.Short() { + t.Skip("This test can take a long time") + } + + ctx, cancel := context.WithCancel(context.Background()) + + // 1. Setup the server that sends series that intermittently appear and disappear. + n := &atomic.Uint64{} + scrapeServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Increment the scrape count atomically per scrape. + i := n.Add(1) + + select { + case <-ctx.Done(): + return + default: + } + + // Alternate metrics per scrape so that every one of + // them will be reported as stale. + if i%2 == 0 { + fmt.Fprintf(rw, ` +# HELP jvm_memory_bytes_used Used bytes of a given JVM memory area. +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{area="heap"} %.1f`, float64(i)) + } else { + fmt.Fprintf(rw, ` +# HELP jvm_memory_pool_bytes_used Used bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_bytes_used gauge +jvm_memory_pool_bytes_used{pool="CodeHeap 'non-nmethods'"} %.1f`, float64(i)) + } + })) + defer scrapeServer.Close() + + serverURL, err := url.Parse(scrapeServer.URL) + require.NoError(t, err) + + // 2. Set up the Prometheus RemoteWrite endpoint. + prweUploads := make(chan *prompb.WriteRequest) + prweServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Snappy decode the uploads. + payload, rerr := io.ReadAll(req.Body) + require.NoError(t, rerr) + + recv := make([]byte, len(payload)) + decoded, derr := snappy.Decode(recv, payload) + require.NoError(t, derr) + + writeReq := new(prompb.WriteRequest) + require.NoError(t, proto.Unmarshal(decoded, writeReq)) + + select { + case <-ctx.Done(): + return + case prweUploads <- writeReq: + } + })) + defer prweServer.Close() + + // 3. Set the OpenTelemetry Prometheus receiver. + cfg := fmt.Sprintf(` +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'test' + scrape_interval: 100ms + static_configs: + - targets: [%q] + +processors: + batch: +exporters: + prometheusremotewrite: + endpoint: %q + tls: + insecure: true + +service: + pipelines: + metrics: + receivers: [prometheus] + processors: [batch] + exporters: [prometheusremotewrite]`, serverURL.Host, prweServer.URL) + + confFile, err := os.CreateTemp(os.TempDir(), "conf-") + require.Nil(t, err) + defer os.Remove(confFile.Name()) + _, err = confFile.Write([]byte(cfg)) + require.Nil(t, err) + // 4. Run the OpenTelemetry Collector. + receivers, err := receiver.MakeFactoryMap(prometheusreceiver.NewFactory()) + require.Nil(t, err) + exporters, err := exporter.MakeFactoryMap(prometheusremotewriteexporter.NewFactory()) + require.Nil(t, err) + processors, err := processor.MakeFactoryMap(batchprocessor.NewFactory()) + require.Nil(t, err) + + factories := otelcol.Factories{ + Receivers: receivers, + Exporters: exporters, + Processors: processors, + } + + fmp := fileprovider.New() + configProvider, err := otelcol.NewConfigProvider( + otelcol.ConfigProviderSettings{ + ResolverSettings: confmap.ResolverSettings{ + URIs: []string{confFile.Name()}, + Providers: map[string]confmap.Provider{fmp.Scheme(): fmp}, + }, + }) + require.NoError(t, err) + + appSettings := otelcol.CollectorSettings{ + Factories: factories, + ConfigProvider: configProvider, + BuildInfo: component.BuildInfo{ + Command: "otelcol", + Description: "OpenTelemetry Collector", + Version: "tests", + }, + LoggingOptions: []zap.Option{ + // Turn off the verbose logging from the collector. + zap.WrapCore(func(zapcore.Core) zapcore.Core { + return zapcore.NewNopCore() + }), + }, + } + + app, err := otelcol.NewCollector(appSettings) + require.Nil(t, err) + + go func() { + assert.NoError(t, app.Run(context.Background())) + }() + defer app.Shutdown() + + // Wait until the collector has actually started. + for notYetStarted := true; notYetStarted; { + state := app.GetState() + switch state { + case otelcol.StateRunning, otelcol.StateClosed, otelcol.StateClosing: + notYetStarted = false + case otelcol.StateStarting: + } + time.Sleep(10 * time.Millisecond) + } + + // 5. Let's wait on 10 fetches. + var wReqL []*prompb.WriteRequest + for i := 0; i < 10; i++ { + wReqL = append(wReqL, <-prweUploads) + } + defer cancel() + + // 6. Assert that we encounter the stale markers aka special NaNs for the various time series. + staleMarkerCount := 0 + totalSamples := 0 + require.True(t, len(wReqL) > 0, "Expecting at least one WriteRequest") + for i, wReq := range wReqL { + name := fmt.Sprintf("WriteRequest#%d", i) + require.True(t, len(wReq.Timeseries) > 0, "Expecting at least 1 timeSeries for:: "+name) + for j, ts := range wReq.Timeseries { + fullName := fmt.Sprintf("%s/TimeSeries#%d", name, j) + assert.True(t, len(ts.Samples) > 0, "Expected at least 1 Sample in:: "+fullName) + + // We are strictly counting series directly included in the scrapes, and no + // internal timeseries like "up" nor "scrape_seconds" etc. + metricName := "" + for _, label := range ts.Labels { + if label.Name == "__name__" { + metricName = label.Value + } + } + if !strings.HasPrefix(metricName, "jvm") { + continue + } + + for _, sample := range ts.Samples { + totalSamples++ + if value.IsStaleNaN(sample.Value) { + staleMarkerCount++ + } + } + } + } + + require.True(t, totalSamples > 0, "Expected at least 1 sample") + // On every alternative scrape the prior scrape will be reported as sale. + // Expect at least: + // * The first scrape will NOT return stale markers + // * (N-1 / alternatives) = ((10-1) / 2) = ~40% chance of stale markers being emitted. + chance := float64(staleMarkerCount) / float64(totalSamples) + require.True(t, chance >= 0.4, fmt.Sprintf("Expected at least one stale marker: %.3f", chance)) +} diff --git a/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster.go b/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster.go index a3169049e132..9195136e7841 100644 --- a/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster.go +++ b/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" @@ -79,6 +68,9 @@ func (stma *startTimeMetricAdjuster) AdjustMetrics(metrics pmetric.Metrics) erro dp.SetStartTimestamp(startTimeTs) } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeExponentialHistogram: + fallthrough + default: stma.logger.Warn("Unknown metric type", zap.String("type", metric.Type().String())) } @@ -110,6 +102,8 @@ func (stma *startTimeMetricAdjuster) getStartTime(metrics pmetric.Metrics) (floa } return metric.Sum().DataPoints().At(0).DoubleValue(), nil + case pmetric.MetricTypeEmpty, pmetric.MetricTypeHistogram, pmetric.MetricTypeExponentialHistogram, pmetric.MetricTypeSummary: + fallthrough default: return 0, errUnsupportedTypeStartTimeMetric } diff --git a/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster_test.go b/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster_test.go index 0d4e1c66a277..89e4b10f8e5f 100644 --- a/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster_test.go +++ b/component/otelcol/receiver/prometheus/internal/starttimemetricadjuster_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal @@ -150,6 +139,7 @@ func TestStartTimeMetricMatch(t *testing.T) { for l := 0; l < dps.Len(); l++ { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeExponentialHistogram: } } } diff --git a/component/otelcol/receiver/prometheus/internal/transaction.go b/component/otelcol/receiver/prometheus/internal/transaction.go index d942e2947d57..6b59674c199d 100644 --- a/component/otelcol/receiver/prometheus/internal/transaction.go +++ b/component/otelcol/receiver/prometheus/internal/transaction.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" @@ -28,6 +17,7 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -37,20 +27,36 @@ import ( ) const ( - targetMetricName = "target_info" + targetMetricName = "target_info" + scopeMetricName = "otel_scope_info" + scopeNameLabel = "otel_scope_name" + scopeVersionLabel = "otel_scope_version" + receiverName = "otelcol/prometheusreceiver" ) type transaction struct { - isNew bool - ctx context.Context - families map[string]*metricFamily - mc scrape.MetricMetadataStore - sink consumer.Metrics - externalLabels labels.Labels - nodeResource pcommon.Resource - logger *zap.Logger - metricAdjuster MetricsAdjuster - obsrecv *receiverhelper.ObsReport + isNew bool + trimSuffixes bool + ctx context.Context + families map[scopeID]map[string]*metricFamily + mc scrape.MetricMetadataStore + sink consumer.Metrics + externalLabels labels.Labels + nodeResource pcommon.Resource + scopeAttributes map[scopeID]pcommon.Map + logger *zap.Logger + buildInfo component.BuildInfo + metricAdjuster MetricsAdjuster + obsrecv *receiverhelper.ObsReport + // Used as buffer to calculate series ref hash. + bufBytes []byte +} + +var emptyScopeID scopeID + +type scopeID struct { + name string + version string } func newTransaction( @@ -59,22 +65,27 @@ func newTransaction( sink consumer.Metrics, externalLabels labels.Labels, settings receiver.CreateSettings, - obsrecv *receiverhelper.ObsReport) *transaction { + obsrecv *receiverhelper.ObsReport, + trimSuffixes bool) *transaction { return &transaction{ - ctx: ctx, - families: make(map[string]*metricFamily), - isNew: true, - sink: sink, - metricAdjuster: metricAdjuster, - externalLabels: externalLabels, - logger: settings.Logger, - obsrecv: obsrecv, + ctx: ctx, + families: make(map[scopeID]map[string]*metricFamily), + isNew: true, + trimSuffixes: trimSuffixes, + sink: sink, + metricAdjuster: metricAdjuster, + externalLabels: externalLabels, + logger: settings.Logger, + buildInfo: settings.BuildInfo, + obsrecv: obsrecv, + bufBytes: make([]byte, 0, 1024), + scopeAttributes: make(map[scopeID]pcommon.Map), } } // Append always returns 0 to disable label caching. -func (t *transaction) Append(ref storage.SeriesRef, ls labels.Labels, atMs int64, val float64) (storage.SeriesRef, error) { +func (t *transaction) Append(_ storage.SeriesRef, ls labels.Labels, atMs int64, val float64) (storage.SeriesRef, error) { select { case <-t.ctx.Done(): return 0, errTransactionAborted @@ -123,27 +134,87 @@ func (t *transaction) Append(ref storage.SeriesRef, ls labels.Labels, atMs int64 // For the `target_info` metric we need to convert it to resource attributes. if metricName == targetMetricName { - return 0, t.AddTargetInfo(ls) + t.AddTargetInfo(ls) + return 0, nil + } + + // For the `otel_scope_info` metric we need to convert it to scope attributes. + if metricName == scopeMetricName { + t.addScopeInfo(ls) + return 0, nil + } + + curMF := t.getOrCreateMetricFamily(getScopeID(ls), metricName) + err := curMF.addSeries(t.getSeriesRef(ls, curMF.mtype), metricName, ls, atMs, val) + if err != nil { + t.logger.Warn("failed to add datapoint", zap.Error(err), zap.String("metric_name", metricName), zap.Any("labels", ls)) } - curMF, ok := t.families[metricName] + return 0, nil // never return errors, as that fails the whole scrape +} + +func (t *transaction) getOrCreateMetricFamily(scope scopeID, mn string) *metricFamily { + _, ok := t.families[scope] + if !ok { + t.families[scope] = make(map[string]*metricFamily) + } + curMf, ok := t.families[scope][mn] if !ok { - familyName := normalizeMetricName(metricName) - if mf, ok := t.families[familyName]; ok && mf.includesMetric(metricName) { - curMF = mf + fn := mn + if _, ok := t.mc.GetMetadata(mn); !ok { + fn = normalizeMetricName(mn) + } + if mf, ok := t.families[scope][fn]; ok && mf.includesMetric(mn) { + curMf = mf } else { - curMF = newMetricFamily(metricName, t.mc, t.logger) - t.families[curMF.name] = curMF + curMf = newMetricFamily(mn, t.mc, t.logger) + t.families[scope][curMf.name] = curMf + } + } + return curMf +} + +func (t *transaction) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + select { + case <-t.ctx.Done(): + return 0, errTransactionAborted + default: + } + + if t.isNew { + if err := t.initTransaction(l); err != nil { + return 0, err } } - return 0, curMF.Add(metricName, ls, atMs, val) + l = l.WithoutEmpty() + + if dupLabel, hasDup := l.HasDuplicateLabelNames(); hasDup { + return 0, fmt.Errorf("invalid sample: non-unique label names: %q", dupLabel) + } + + mn := l.Get(model.MetricNameLabel) + if mn == "" { + return 0, errMetricNameNotFound + } + + mf := t.getOrCreateMetricFamily(getScopeID(l), mn) + mf.addExemplar(t.getSeriesRef(l, mf.mtype), e) + + return 0, nil } -func (t *transaction) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { +func (t *transaction) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + //TODO: implement this func return 0, nil } +func (t *transaction) getSeriesRef(ls labels.Labels, mtype pmetric.MetricType) uint64 { + var hash uint64 + hash, t.bufBytes = getSeriesRef(t.bufBytes, ls, mtype) + return hash +} + // getMetrics returns all metrics to the given slice. // The only error returned by this function is errNoDataToBuild. func (t *transaction) getMetrics(resource pcommon.Resource) (pmetric.Metrics, error) { @@ -154,15 +225,47 @@ func (t *transaction) getMetrics(resource pcommon.Resource) (pmetric.Metrics, er md := pmetric.NewMetrics() rms := md.ResourceMetrics().AppendEmpty() resource.CopyTo(rms.Resource()) - metrics := rms.ScopeMetrics().AppendEmpty().Metrics() - for _, mf := range t.families { - mf.appendMetric(metrics) + for scope, mfs := range t.families { + ils := rms.ScopeMetrics().AppendEmpty() + // If metrics don't include otel_scope_name or otel_scope_version + // labels, use the receiver name and version. + if scope == emptyScopeID { + ils.Scope().SetName(receiverName) + ils.Scope().SetVersion(t.buildInfo.Version) + } else { + // Otherwise, use the scope that was provided with the metrics. + ils.Scope().SetName(scope.name) + ils.Scope().SetVersion(scope.version) + // If we got an otel_scope_info metric for that scope, get scope + // attributes from it. + attributes, ok := t.scopeAttributes[scope] + if ok { + attributes.CopyTo(ils.Scope().Attributes()) + } + } + metrics := ils.Metrics() + for _, mf := range mfs { + mf.appendMetric(metrics, t.trimSuffixes) + } } return md, nil } +func getScopeID(ls labels.Labels) scopeID { + var scope scopeID + for _, lbl := range ls { + if lbl.Name == scopeNameLabel { + scope.name = lbl.Value + } + if lbl.Name == scopeVersionLabel { + scope.version = lbl.Value + } + } + return scope +} + func (t *transaction) initTransaction(labels labels.Labels) error { target, ok := scrape.TargetFromContext(t.ctx) if !ok { @@ -213,26 +316,41 @@ func (t *transaction) Rollback() error { return nil } -func (t *transaction) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { +func (t *transaction) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { //TODO: implement this func return 0, nil } -func (t *transaction) AppendHistogram(ref storage.SeriesRef, l labels.Labels, ts int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - //TODO: implement this func - return 0, nil -} - -func (t *transaction) AddTargetInfo(labels labels.Labels) error { +func (t *transaction) AddTargetInfo(labels labels.Labels) { attrs := t.nodeResource.Attributes() - for _, lbl := range labels { if lbl.Name == model.JobLabel || lbl.Name == model.InstanceLabel || lbl.Name == model.MetricNameLabel { continue } + attrs.PutStr(lbl.Name, lbl.Value) + } +} +func (t *transaction) addScopeInfo(labels labels.Labels) { + attrs := pcommon.NewMap() + scope := scopeID{} + for _, lbl := range labels { + if lbl.Name == model.JobLabel || lbl.Name == model.InstanceLabel || lbl.Name == model.MetricNameLabel { + continue + } + if lbl.Name == scopeNameLabel { + scope.name = lbl.Value + continue + } + if lbl.Name == scopeVersionLabel { + scope.version = lbl.Value + continue + } attrs.PutStr(lbl.Name, lbl.Value) } + t.scopeAttributes[scope] = attrs +} - return nil +func getSeriesRef(bytes []byte, ls labels.Labels, mtype pmetric.MetricType) (uint64, []byte) { + return ls.HashWithoutLabels(bytes, getSortedNotUsefulLabels(mtype)...) } diff --git a/component/otelcol/receiver/prometheus/internal/transaction_test.go b/component/otelcol/receiver/prometheus/internal/transaction_test.go index 279093185a95..f8e7fb286cdb 100644 --- a/component/otelcol/receiver/prometheus/internal/transaction_test.go +++ b/component/otelcol/receiver/prometheus/internal/transaction_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal @@ -21,6 +10,7 @@ import ( "time" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/scrape" @@ -32,6 +22,8 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/receiverhelper" "go.opentelemetry.io/collector/receiver/receivertest" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" ) const ( @@ -61,24 +53,24 @@ var ( ) func TestTransactionCommitWithoutAdding(t *testing.T) { - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) assert.NoError(t, tr.Commit()) } func TestTransactionRollbackDoesNothing(t *testing.T) { - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) assert.NoError(t, tr.Rollback()) } func TestTransactionUpdateMetadataDoesNothing(t *testing.T) { - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) _, err := tr.UpdateMetadata(0, labels.New(), metadata.Metadata{}) assert.NoError(t, err) } func TestTransactionAppendNoTarget(t *testing.T) { badLabels := labels.FromStrings(model.MetricNameLabel, "counter_test") - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) _, err := tr.Append(0, badLabels, time.Now().Unix()*1000, 1.0) assert.Error(t, err) } @@ -88,7 +80,7 @@ func TestTransactionAppendNoMetricName(t *testing.T) { model.InstanceLabel: "localhost:8080", model.JobLabel: "test2", }) - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) _, err := tr.Append(0, jobNotFoundLb, time.Now().Unix()*1000, 1.0) assert.ErrorIs(t, err, errMetricNameNotFound) @@ -96,7 +88,7 @@ func TestTransactionAppendNoMetricName(t *testing.T) { } func TestTransactionAppendEmptyMetricName(t *testing.T) { - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, consumertest.NewNop(), nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) _, err := tr.Append(0, labels.FromMap(map[string]string{ model.InstanceLabel: "localhost:8080", model.JobLabel: "test2", @@ -107,7 +99,7 @@ func TestTransactionAppendEmptyMetricName(t *testing.T) { func TestTransactionAppendResource(t *testing.T) { sink := new(consumertest.MetricsSink) - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) _, err := tr.Append(0, labels.FromMap(map[string]string{ model.InstanceLabel: "localhost:8080", model.JobLabel: "test", @@ -128,6 +120,28 @@ func TestTransactionAppendResource(t *testing.T) { require.Equal(t, expectedResource, gotResource) } +func TestReceiverVersionAndNameAreAttached(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + assert.NoError(t, err) + assert.NoError(t, tr.Commit()) + + expectedResource := CreateResource("test", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")) + mds := sink.AllMetrics() + require.Len(t, mds, 1) + gotResource := mds[0].ResourceMetrics().At(0).Resource() + require.Equal(t, expectedResource, gotResource) + + gotScope := mds[0].ResourceMetrics().At(0).ScopeMetrics().At(0).Scope() + require.Equal(t, receiverName, gotScope.Name()) + require.Equal(t, component.NewDefaultBuildInfo().Version, gotScope.Version()) +} + func TestTransactionCommitErrorWhenAdjusterError(t *testing.T) { goodLabels := labels.FromMap(map[string]string{ model.InstanceLabel: "localhost:8080", @@ -136,7 +150,7 @@ func TestTransactionCommitErrorWhenAdjusterError(t *testing.T) { }) sink := new(consumertest.MetricsSink) adjusterErr := errors.New("adjuster error") - tr := newTransaction(scrapeCtx, &errorAdjuster{err: adjusterErr}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &errorAdjuster{err: adjusterErr}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) _, err := tr.Append(0, goodLabels, time.Now().Unix()*1000, 1.0) assert.NoError(t, err) assert.ErrorIs(t, tr.Commit(), adjusterErr) @@ -145,7 +159,7 @@ func TestTransactionCommitErrorWhenAdjusterError(t *testing.T) { // Ensure that we reject duplicate label keys. See https://github.com/open-telemetry/wg-prometheus/issues/44. func TestTransactionAppendDuplicateLabels(t *testing.T) { sink := new(consumertest.MetricsSink) - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) dupLabels := labels.FromStrings( model.InstanceLabel, "0.0.0.0:8855", @@ -163,7 +177,18 @@ func TestTransactionAppendDuplicateLabels(t *testing.T) { func TestTransactionAppendHistogramNoLe(t *testing.T) { sink := new(consumertest.MetricsSink) - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + receiverSettings := receivertest.NewNopCreateSettings() + core, observedLogs := observer.New(zap.InfoLevel) + receiverSettings.Logger = zap.New(core) + tr := newTransaction( + scrapeCtx, + &startTimeAdjuster{startTime: startTimestamp}, + sink, + nil, + receiverSettings, + nopObsRecv(t), + false, + ) goodLabels := labels.FromStrings( model.InstanceLabel, "0.0.0.0:8855", @@ -172,12 +197,28 @@ func TestTransactionAppendHistogramNoLe(t *testing.T) { ) _, err := tr.Append(0, goodLabels, 1917, 1.0) - require.ErrorIs(t, err, errEmptyLeLabel) + require.NoError(t, err) + assert.Equal(t, 1, observedLogs.Len()) + assert.Equal(t, 1, observedLogs.FilterMessage("failed to add datapoint").Len()) + + assert.NoError(t, tr.Commit()) + assert.Len(t, sink.AllMetrics(), 0) } func TestTransactionAppendSummaryNoQuantile(t *testing.T) { sink := new(consumertest.MetricsSink) - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + receiverSettings := receivertest.NewNopCreateSettings() + core, observedLogs := observer.New(zap.InfoLevel) + receiverSettings.Logger = zap.New(core) + tr := newTransaction( + scrapeCtx, + &startTimeAdjuster{startTime: startTimestamp}, + sink, + nil, + receiverSettings, + nopObsRecv(t), + false, + ) goodLabels := labels.FromStrings( model.InstanceLabel, "0.0.0.0:8855", @@ -186,18 +227,139 @@ func TestTransactionAppendSummaryNoQuantile(t *testing.T) { ) _, err := tr.Append(0, goodLabels, 1917, 1.0) - require.ErrorIs(t, err, errEmptyQuantileLabel) + require.NoError(t, err) + assert.Equal(t, 1, observedLogs.Len()) + assert.Equal(t, 1, observedLogs.FilterMessage("failed to add datapoint").Len()) + + assert.NoError(t, tr.Commit()) + assert.Len(t, sink.AllMetrics(), 0) +} + +func TestTransactionAppendValidAndInvalid(t *testing.T) { + sink := new(consumertest.MetricsSink) + receiverSettings := receivertest.NewNopCreateSettings() + core, observedLogs := observer.New(zap.InfoLevel) + receiverSettings.Logger = zap.New(core) + tr := newTransaction( + scrapeCtx, + &startTimeAdjuster{startTime: startTimestamp}, + sink, + nil, + receiverSettings, + nopObsRecv(t), + false, + ) + + // a valid counter + _, err := tr.Append(0, labels.FromMap(map[string]string{ + model.InstanceLabel: "localhost:8080", + model.JobLabel: "test", + model.MetricNameLabel: "counter_test", + }), time.Now().Unix()*1000, 1.0) + assert.NoError(t, err) + + // summary without quantiles, should be ignored + summarylabels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "summary_test", + ) + + _, err = tr.Append(0, summarylabels, 1917, 1.0) + require.NoError(t, err) + + assert.Equal(t, 1, observedLogs.Len()) + assert.Equal(t, 1, observedLogs.FilterMessage("failed to add datapoint").Len()) + + assert.NoError(t, tr.Commit()) + expectedResource := CreateResource("test", "localhost:8080", labels.FromStrings(model.SchemeLabel, "http")) + mds := sink.AllMetrics() + require.Len(t, mds, 1) + gotResource := mds[0].ResourceMetrics().At(0).Resource() + require.Equal(t, expectedResource, gotResource) + require.Equal(t, 1, mds[0].MetricCount()) +} + +func TestAppendExemplarWithNoMetricName(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + ) + + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errMetricNameNotFound, err) +} + +func TestAppendExemplarWithEmptyMetricName(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errMetricNameNotFound, err) +} + +func TestAppendExemplarWithDuplicateLabels(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "", + "a", "b", + "a", "c", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + require.Error(t, err) + assert.Contains(t, err.Error(), `invalid sample: non-unique label names: "a"`) +} + +func TestAppendExemplarWithoutAddingMetric(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) + + labels := labels.FromStrings( + model.InstanceLabel, "0.0.0.0:8855", + model.JobLabel, "test", + model.MetricNameLabel, "counter_test", + "a", "b", + ) + _, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0}) + assert.NoError(t, err) +} + +func TestAppendExemplarWithNoLabels(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) + + _, err := tr.AppendExemplar(0, nil, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errNoJobInstance, err) +} + +func TestAppendExemplarWithEmptyLabelArray(t *testing.T) { + sink := new(consumertest.MetricsSink) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) + + _, err := tr.AppendExemplar(0, []labels.Label{}, exemplar.Exemplar{Value: 0}) + assert.Equal(t, errNoJobInstance, err) } func nopObsRecv(t *testing.T) *receiverhelper.ObsReport { - res, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ + obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: component.NewID("prometheus"), Transport: transport, ReceiverCreateSettings: receivertest.NewNopCreateSettings(), }) - - assert.NoError(t, err) - return res + require.NoError(t, err) + return obsrecv } func TestMetricBuilderCounters(t *testing.T) { @@ -207,7 +369,58 @@ func TestMetricBuilderCounters(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("counter_test", 100, "foo", "bar"), + createDataPoint("counter_test", 100, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("counter_test") + sum := m0.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + sum.SetIsMonotonic(true) + pt0 := sum.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(startTimestamp) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "single-item-with-exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "counter_test", + 100, + []exemplar.Exemplar{ + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}, + }, + }, + "foo", "bar"), }, }, }, @@ -225,6 +438,33 @@ func TestMetricBuilderCounters(t *testing.T) { pt0.SetTimestamp(tsNanos) pt0.Attributes().PutStr("foo", "bar") + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663113420863)) + e0.SetDoubleValue(1) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr("foo", "bar") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663113420863)) + e1.SetDoubleValue(1) + e1.FilteredAttributes().PutStr("foo", "bar") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663113420863)) + e2.SetDoubleValue(1) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663113420863)) + e3.SetDoubleValue(1) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + return []pmetric.Metrics{md0} }, }, @@ -233,8 +473,8 @@ func TestMetricBuilderCounters(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("counter_test", 150, "foo", "bar"), - createDataPoint("counter_test", 25, "foo", "other"), + createDataPoint("counter_test", 150, nil, "foo", "bar"), + createDataPoint("counter_test", 25, nil, "foo", "other"), }, }, }, @@ -266,9 +506,9 @@ func TestMetricBuilderCounters(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("counter_test", 150, "foo", "bar"), - createDataPoint("counter_test", 25, "foo", "other"), - createDataPoint("counter_test2", 100, "foo", "bar"), + createDataPoint("counter_test", 150, nil, "foo", "bar"), + createDataPoint("counter_test", 25, nil, "foo", "other"), + createDataPoint("counter_test2", 100, nil, "foo", "bar"), }, }, }, @@ -311,7 +551,7 @@ func TestMetricBuilderCounters(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("poor_name_count", 100, "foo", "bar"), + createDataPoint("poor_name_count", 100, nil, "foo", "bar"), }, }, }, @@ -348,12 +588,77 @@ func TestMetricBuilderGauges(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("gauge_test", 100, "foo", "bar"), + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + }, + }, + { + pts: []*testDataPoint{ + createDataPoint("gauge_test", 90, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("gauge_test") + gauge0 := m0.SetEmptyGauge() + pt0 := gauge0.DataPoints().AppendEmpty() + pt0.SetDoubleValue(100.0) + pt0.SetStartTimestamp(0) + pt0.SetTimestamp(tsNanos) + pt0.Attributes().PutStr("foo", "bar") + + md1 := pmetric.NewMetrics() + mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m1 := mL1.AppendEmpty() + m1.SetName("gauge_test") + gauge1 := m1.SetEmptyGauge() + pt1 := gauge1.DataPoints().AppendEmpty() + pt1.SetDoubleValue(90.0) + pt1.SetStartTimestamp(0) + pt1.SetTimestamp(tsPlusIntervalNanos) + pt1.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0, md1} + }, + }, + { + name: "one-gauge-with-exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "gauge_test", + 100, + []exemplar.Exemplar{ + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}, + }, + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}}, + }, + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}, + }, + { + Value: 2, + Ts: 1663350815890, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}, + }, + }, + "foo", "bar"), }, }, { pts: []*testDataPoint{ - createDataPoint("gauge_test", 90, "foo", "bar"), + createDataPoint("gauge_test", 90, nil, "foo", "bar"), }, }, }, @@ -369,6 +674,33 @@ func TestMetricBuilderGauges(t *testing.T) { pt0.SetTimestamp(tsNanos) pt0.Attributes().PutStr("foo", "bar") + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663350815890)) + e0.SetDoubleValue(2) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr("foo", "bar") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663350815890)) + e1.SetDoubleValue(2) + e1.FilteredAttributes().PutStr("foo", "bar") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663350815890)) + e2.SetDoubleValue(2) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663350815890)) + e3.SetDoubleValue(2) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + md1 := pmetric.NewMetrics() mL1 := md1.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() m1 := mL1.AppendEmpty() @@ -388,8 +720,8 @@ func TestMetricBuilderGauges(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("gauge_test", 100, "foo", "bar"), - createDataPoint("gauge_test", 200, "bar", "foo"), + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + createDataPoint("gauge_test", 200, nil, "bar", "foo"), }, }, }, @@ -421,13 +753,13 @@ func TestMetricBuilderGauges(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("gauge_test", 100, "foo", "bar"), - createDataPoint("gauge_test", 200, "bar", "foo"), + createDataPoint("gauge_test", 100, nil, "foo", "bar"), + createDataPoint("gauge_test", 200, nil, "bar", "foo"), }, }, { pts: []*testDataPoint{ - createDataPoint("gauge_test", 20, "foo", "bar"), + createDataPoint("gauge_test", 20, nil, "foo", "bar"), }, }, }, @@ -479,7 +811,7 @@ func TestMetricBuilderUntyped(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("unknown_test", 100, "foo", "bar"), + createDataPoint("unknown_test", 100, nil, "foo", "bar"), }, }, }, @@ -503,9 +835,9 @@ func TestMetricBuilderUntyped(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("something_not_exists", 100, "foo", "bar"), - createDataPoint("theother_not_exists", 200, "foo", "bar"), - createDataPoint("theother_not_exists", 300, "bar", "foo"), + createDataPoint("something_not_exists", 100, nil, "foo", "bar"), + createDataPoint("theother_not_exists", 200, nil, "foo", "bar"), + createDataPoint("theother_not_exists", 300, nil, "bar", "foo"), }, }, }, @@ -541,7 +873,7 @@ func TestMetricBuilderUntyped(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("some_count", 100, "foo", "bar"), + createDataPoint("some_count", 100, nil, "foo", "bar"), }, }, }, @@ -575,11 +907,73 @@ func TestMetricBuilderHistogram(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 1, "foo", "bar", "le", "10"), - createDataPoint("hist_test_bucket", 2, "foo", "bar", "le", "20"), - createDataPoint("hist_test_bucket", 10, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test_sum", 99, "foo", "bar"), - createDataPoint("hist_test_count", 10, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + }, + }, + }, + wants: func() []pmetric.Metrics { + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.ExplicitBounds().FromRaw([]float64{10, 20}) + pt0.BucketCounts().FromRaw([]uint64{1, 1, 8}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} + }, + }, + { + name: "single item with exemplars", + inputs: []*testScrapedPage{ + { + pts: []*testDataPoint{ + createDataPoint( + "hist_test_bucket", + 1, + []exemplar.Exemplar{ + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: model.MetricNameLabel, Value: "counter_test"}, {Name: model.JobLabel, Value: "job"}, {Name: model.InstanceLabel, Value: "instance"}, {Name: "foo", Value: "bar"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: ""}, {Name: "span_id", Value: ""}, {Name: "le", Value: "20"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "10a47365b8aa04e08291fab9deca84db6170"}, {Name: "traceid", Value: "e3688e1aa2961786"}, {Name: "span_id", Value: "719cee4a669fd7d109ff"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc880"}, {Name: "span_id", Value: "dfa4597a9d"}}, + }, + { + Value: 1, + Ts: 1663113420863, + Labels: []labels.Label{{Name: "foo", Value: "bar"}, {Name: "trace_id", Value: "174137cab66dc88"}, {Name: "span_id", Value: "dfa4597a9"}}, + }, + }, + "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), }, }, }, @@ -599,6 +993,42 @@ func TestMetricBuilderHistogram(t *testing.T) { pt0.SetStartTimestamp(startTimestamp) pt0.Attributes().PutStr("foo", "bar") + e0 := pt0.Exemplars().AppendEmpty() + e0.SetTimestamp(timestampFromMs(1663113420863)) + e0.SetDoubleValue(1) + e0.FilteredAttributes().PutStr(model.MetricNameLabel, "counter_test") + e0.FilteredAttributes().PutStr(model.JobLabel, "job") + e0.FilteredAttributes().PutStr(model.InstanceLabel, "instance") + e0.FilteredAttributes().PutStr("foo", "bar") + + e1 := pt0.Exemplars().AppendEmpty() + e1.SetTimestamp(timestampFromMs(1663113420863)) + e1.SetDoubleValue(1) + e1.FilteredAttributes().PutStr("foo", "bar") + e1.FilteredAttributes().PutStr("le", "20") + + e2 := pt0.Exemplars().AppendEmpty() + e2.SetTimestamp(timestampFromMs(1663113420863)) + e2.SetDoubleValue(1) + e2.FilteredAttributes().PutStr("foo", "bar") + e2.FilteredAttributes().PutStr("traceid", "e3688e1aa2961786") + e2.SetTraceID([16]byte{0x10, 0xa4, 0x73, 0x65, 0xb8, 0xaa, 0x04, 0xe0, 0x82, 0x91, 0xfa, 0xb9, 0xde, 0xca, 0x84, 0xdb}) + e2.SetSpanID([8]byte{0x71, 0x9c, 0xee, 0x4a, 0x66, 0x9f, 0xd7, 0xd1}) + + e3 := pt0.Exemplars().AppendEmpty() + e3.SetTimestamp(timestampFromMs(1663113420863)) + e3.SetDoubleValue(1) + e3.FilteredAttributes().PutStr("foo", "bar") + e3.SetTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x41, 0x37, 0xca, 0xb6, 0x6d, 0xc8, 0x80}) + e3.SetSpanID([8]byte{0x00, 0x00, 0x00, 0xdf, 0xa4, 0x59, 0x7a, 0x9d}) + + e4 := pt0.Exemplars().AppendEmpty() + e4.SetTimestamp(timestampFromMs(1663113420863)) + e4.SetDoubleValue(1) + e4.FilteredAttributes().PutStr("foo", "bar") + e4.FilteredAttributes().PutStr("trace_id", "174137cab66dc88") + e4.FilteredAttributes().PutStr("span_id", "dfa4597a9") + return []pmetric.Metrics{md0} }, }, @@ -607,16 +1037,16 @@ func TestMetricBuilderHistogram(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 1, "foo", "bar", "le", "10"), - createDataPoint("hist_test_bucket", 2, "foo", "bar", "le", "20"), - createDataPoint("hist_test_bucket", 10, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test_sum", 99, "foo", "bar"), - createDataPoint("hist_test_count", 10, "foo", "bar"), - createDataPoint("hist_test_bucket", 1, "key2", "v2", "le", "10"), - createDataPoint("hist_test_bucket", 2, "key2", "v2", "le", "20"), - createDataPoint("hist_test_bucket", 3, "key2", "v2", "le", "+inf"), - createDataPoint("hist_test_sum", 50, "key2", "v2"), - createDataPoint("hist_test_count", 3, "key2", "v2"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "key2", "v2", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "key2", "v2", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, nil, "key2", "v2"), + createDataPoint("hist_test_count", 3, nil, "key2", "v2"), }, }, }, @@ -653,21 +1083,21 @@ func TestMetricBuilderHistogram(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 1, "foo", "bar", "le", "10"), - createDataPoint("hist_test_bucket", 2, "foo", "bar", "le", "20"), - createDataPoint("hist_test_bucket", 10, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test_sum", 99, "foo", "bar"), - createDataPoint("hist_test_count", 10, "foo", "bar"), - createDataPoint("hist_test_bucket", 1, "key2", "v2", "le", "10"), - createDataPoint("hist_test_bucket", 2, "key2", "v2", "le", "20"), - createDataPoint("hist_test_bucket", 3, "key2", "v2", "le", "+inf"), - createDataPoint("hist_test_sum", 50, "key2", "v2"), - createDataPoint("hist_test_count", 3, "key2", "v2"), - createDataPoint("hist_test2_bucket", 1, "foo", "bar", "le", "10"), - createDataPoint("hist_test2_bucket", 2, "foo", "bar", "le", "20"), - createDataPoint("hist_test2_bucket", 3, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test2_sum", 50, "foo", "bar"), - createDataPoint("hist_test2_count", 3, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "key2", "v2", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "key2", "v2", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "key2", "v2", "le", "+inf"), + createDataPoint("hist_test_sum", 50, nil, "key2", "v2"), + createDataPoint("hist_test_count", 3, nil, "key2", "v2"), + createDataPoint("hist_test2_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test2_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test2_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test2_sum", 50, nil, "foo", "bar"), + createDataPoint("hist_test2_count", 3, nil, "foo", "bar"), }, }, }, @@ -717,11 +1147,11 @@ func TestMetricBuilderHistogram(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 10, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test_bucket", 1, "foo", "bar", "le", "10"), - createDataPoint("hist_test_bucket", 2, "foo", "bar", "le", "20"), - createDataPoint("hist_test_sum", 99, "foo", "bar"), - createDataPoint("hist_test_count", 10, "foo", "bar"), + createDataPoint("hist_test_bucket", 10, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), }, }, }, @@ -745,14 +1175,14 @@ func TestMetricBuilderHistogram(t *testing.T) { }, }, { - // this won't likely happen in real env, as prometheus won't generate histogram with less than 3 buckets + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets name: "only-one-bucket", inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 3, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test_count", 3, "foo", "bar"), - createDataPoint("hist_test_sum", 100, "foo", "bar"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + createDataPoint("hist_test_sum", 100, nil, "foo", "bar"), }, }, }, @@ -775,14 +1205,14 @@ func TestMetricBuilderHistogram(t *testing.T) { }, }, { - // this won't likely happen in real env, as prometheus won't generate histogram with less than 3 buckets + // this won't likely happen in real env, as prometheus wont generate histogram with less than 3 buckets name: "only-one-bucket-noninf", inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 3, "foo", "bar", "le", "20"), - createDataPoint("hist_test_count", 3, "foo", "bar"), - createDataPoint("hist_test_sum", 100, "foo", "bar"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), + createDataPoint("hist_test_sum", 100, nil, "foo", "bar"), }, }, }, @@ -796,7 +1226,8 @@ func TestMetricBuilderHistogram(t *testing.T) { pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(3) pt0.SetSum(100) - pt0.BucketCounts().FromRaw([]uint64{3}) + pt0.BucketCounts().FromRaw([]uint64{3, 0}) + pt0.ExplicitBounds().FromRaw([]float64{20}) pt0.SetTimestamp(tsNanos) pt0.SetStartTimestamp(startTimestamp) pt0.Attributes().PutStr("foo", "bar") @@ -809,10 +1240,10 @@ func TestMetricBuilderHistogram(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 1, "foo", "bar", "le", "10"), - createDataPoint("hist_test_bucket", 2, "foo", "bar", "le", "20"), - createDataPoint("hist_test_bucket", 3, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test_count", 3, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_count", 3, nil, "foo", "bar"), }, }, }, @@ -839,13 +1270,27 @@ func TestMetricBuilderHistogram(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_sum", 99), - createDataPoint("hist_test_count", 10), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), + createDataPoint("hist_test_count", 10, nil, "foo", "bar"), }, }, }, wants: func() []pmetric.Metrics { - return []pmetric.Metrics{pmetric.NewMetrics()} + md0 := pmetric.NewMetrics() + mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + m0 := mL0.AppendEmpty() + m0.SetName("hist_test") + hist0 := m0.SetEmptyHistogram() + hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + pt0 := hist0.DataPoints().AppendEmpty() + pt0.SetCount(10) + pt0.SetSum(99) + pt0.BucketCounts().FromRaw([]uint64{10}) + pt0.SetTimestamp(tsNanos) + pt0.SetStartTimestamp(startTimestamp) + pt0.Attributes().PutStr("foo", "bar") + + return []pmetric.Metrics{md0} }, }, { @@ -853,10 +1298,10 @@ func TestMetricBuilderHistogram(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("hist_test_bucket", 1, "foo", "bar", "le", "10"), - createDataPoint("hist_test_bucket", 2, "foo", "bar", "le", "20"), - createDataPoint("hist_test_bucket", 3, "foo", "bar", "le", "+inf"), - createDataPoint("hist_test_sum", 99, "foo", "bar"), + createDataPoint("hist_test_bucket", 1, nil, "foo", "bar", "le", "10"), + createDataPoint("hist_test_bucket", 2, nil, "foo", "bar", "le", "20"), + createDataPoint("hist_test_bucket", 3, nil, "foo", "bar", "le", "+inf"), + createDataPoint("hist_test_sum", 99, nil, "foo", "bar"), }, }, }, @@ -880,7 +1325,7 @@ func TestMetricBuilderSummary(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("summary_test", 5, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), }, }, }, @@ -893,10 +1338,10 @@ func TestMetricBuilderSummary(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("summary_test", 1, "foo", "bar", "quantile", "0.5"), - createDataPoint("summary_test", 2, "foo", "bar", "quantile", "0.75"), - createDataPoint("summary_test", 5, "foo", "bar", "quantile", "1"), - createDataPoint("summary_test_sum", 500, "foo", "bar"), + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_sum", 500, nil, "foo", "bar"), }, }, }, @@ -909,10 +1354,10 @@ func TestMetricBuilderSummary(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("summary_test", 1, "foo", "bar", "quantile", "0.5"), - createDataPoint("summary_test", 2, "foo", "bar", "quantile", "0.75"), - createDataPoint("summary_test", 5, "foo", "bar", "quantile", "1"), - createDataPoint("summary_test_count", 500, "foo", "bar"), + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), }, }, }, @@ -938,7 +1383,6 @@ func TestMetricBuilderSummary(t *testing.T) { q100 := qvL.AppendEmpty() q100.SetQuantile(1) q100.SetValue(5.0) - return []pmetric.Metrics{md0} }, }, @@ -947,8 +1391,8 @@ func TestMetricBuilderSummary(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("summary_test_sum", 100, "foo", "bar"), - createDataPoint("summary_test_count", 500, "foo", "bar"), + createDataPoint("summary_test_sum", 100, nil, "foo", "bar"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), }, }, }, @@ -973,11 +1417,11 @@ func TestMetricBuilderSummary(t *testing.T) { inputs: []*testScrapedPage{ { pts: []*testDataPoint{ - createDataPoint("summary_test", 1, "foo", "bar", "quantile", "0.5"), - createDataPoint("summary_test", 2, "foo", "bar", "quantile", "0.75"), - createDataPoint("summary_test", 5, "foo", "bar", "quantile", "1"), - createDataPoint("summary_test_sum", 100, "foo", "bar"), - createDataPoint("summary_test_count", 500, "foo", "bar"), + createDataPoint("summary_test", 1, nil, "foo", "bar", "quantile", "0.5"), + createDataPoint("summary_test", 2, nil, "foo", "bar", "quantile", "0.75"), + createDataPoint("summary_test", 5, nil, "foo", "bar", "quantile", "1"), + createDataPoint("summary_test_sum", 100, nil, "foo", "bar"), + createDataPoint("summary_test_count", 500, nil, "foo", "bar"), }, }, }, @@ -1028,12 +1472,17 @@ func (tt buildTestData) run(t *testing.T) { st := ts for i, page := range tt.inputs { sink := new(consumertest.MetricsSink) - tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t)) + tr := newTransaction(scrapeCtx, &startTimeAdjuster{startTime: startTimestamp}, sink, nil, receivertest.NewNopCreateSettings(), nopObsRecv(t), false) for _, pt := range page.pts { // set ts for testing pt.t = st _, err := tr.Append(0, pt.lb, pt.t, pt.v) assert.NoError(t, err) + + for _, e := range pt.exemplars { + _, err := tr.AppendExemplar(0, pt.lb, e) + assert.NoError(t, err) + } } assert.NoError(t, tr.Commit()) mds := sink.AllMetrics() @@ -1084,6 +1533,7 @@ func (s *startTimeAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { for l := 0; l < dps.Len(); l++ { dps.At(l).SetStartTimestamp(s.startTime) } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeExponentialHistogram: } } } @@ -1092,16 +1542,17 @@ func (s *startTimeAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { } type testDataPoint struct { - lb labels.Labels - t int64 - v float64 + lb labels.Labels + t int64 + v float64 + exemplars []exemplar.Exemplar } type testScrapedPage struct { pts []*testDataPoint } -func createDataPoint(mname string, value float64, tagPairs ...string) *testDataPoint { +func createDataPoint(mname string, value float64, es []exemplar.Exemplar, tagPairs ...string) *testDataPoint { var lbls []string lbls = append(lbls, tagPairs...) lbls = append(lbls, model.MetricNameLabel, mname) @@ -1109,9 +1560,10 @@ func createDataPoint(mname string, value float64, tagPairs ...string) *testDataP lbls = append(lbls, model.InstanceLabel, "instance") return &testDataPoint{ - lb: labels.FromStrings(lbls...), - t: ts, - v: value, + lb: labels.FromStrings(lbls...), + t: ts, + v: value, + exemplars: es, } } diff --git a/component/otelcol/receiver/prometheus/internal/util.go b/component/otelcol/receiver/prometheus/internal/util.go index 3f8633a7b31f..405a181f47a5 100644 --- a/component/otelcol/receiver/prometheus/internal/util.go +++ b/component/otelcol/receiver/prometheus/internal/util.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" @@ -33,6 +22,7 @@ const ( metricsSuffixSum = "_sum" metricSuffixTotal = "_total" metricSuffixInfo = "_info" + metricSuffixCreated = "_created" startTimeMetricName = "process_start_time_seconds" scrapeUpMetricName = "up" @@ -41,11 +31,11 @@ const ( ) var ( - trimmableSuffixes = []string{metricsSuffixBucket, metricsSuffixCount, metricsSuffixSum, metricSuffixTotal, metricSuffixInfo} + trimmableSuffixes = []string{metricsSuffixBucket, metricsSuffixCount, metricsSuffixSum, metricSuffixTotal, metricSuffixInfo, metricSuffixCreated} errNoDataToBuild = errors.New("there's no data to build") errNoBoundaryLabel = errors.New("given metricType has no 'le' or 'quantile' label") - errEmptyQuantileLabel = errors.New("'quantile' label on summary metric missing is empty") - errEmptyLeLabel = errors.New("'le' label on histogram metric id missing or empty") + errEmptyQuantileLabel = errors.New("'quantile' label on summary metric is missing or empty") + errEmptyLeLabel = errors.New("'le' label on histogram metric is missing or empty") errMetricNameNotFound = errors.New("metricName not found from labels") errTransactionAborted = errors.New("transaction aborted") errNoJobInstance = errors.New("job or instance cannot be found from labels") @@ -66,6 +56,8 @@ func getSortedNotUsefulLabels(mType pmetric.MetricType) []string { return notUsefulLabelsHistogram case pmetric.MetricTypeSummary: return notUsefulLabelsSummary + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeSum, pmetric.MetricTypeExponentialHistogram: + fallthrough default: return notUsefulLabelsOther } @@ -82,7 +74,7 @@ func timestampFromMs(timeAtMs int64) pcommon.Timestamp { } func getBoundary(metricType pmetric.MetricType, labels labels.Labels) (float64, error) { - val := "" + var val string switch metricType { case pmetric.MetricTypeHistogram: val = labels.Get(model.BucketLabel) @@ -94,6 +86,8 @@ func getBoundary(metricType pmetric.MetricType, labels labels.Labels) (float64, if val == "" { return 0, errEmptyQuantileLabel } + case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeSum, pmetric.MetricTypeExponentialHistogram: + fallthrough default: return 0, errNoBoundaryLabel } @@ -120,6 +114,8 @@ func convToMetricType(metricType textparse.MetricType) (pmetric.MetricType, bool return pmetric.MetricTypeSummary, true case textparse.MetricTypeInfo, textparse.MetricTypeStateset: return pmetric.MetricTypeSum, false + case textparse.MetricTypeGaugeHistogram: + fallthrough default: // including: textparse.MetricTypeGaugeHistogram return pmetric.MetricTypeEmpty, false diff --git a/component/otelcol/receiver/prometheus/internal/util_test.go b/component/otelcol/receiver/prometheus/internal/util_test.go index a9d23c3dbb09..3bea1ac42471 100644 --- a/component/otelcol/receiver/prometheus/internal/util_test.go +++ b/component/otelcol/receiver/prometheus/internal/util_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" @@ -111,7 +100,7 @@ func TestConvToMetricType(t *testing.T) { wantMonotonic: false, }, { - name: "textparse.metric_gauge_histogram", + name: "textparse.metric_gauge_hostogram", mtype: textparse.MetricTypeGaugeHistogram, want: pmetric.MetricTypeEmpty, wantMonotonic: false, diff --git a/component/otelcol/receiver/prometheus/prometheus.go b/component/otelcol/receiver/prometheus/prometheus.go index 7928f504287b..f1b4d8a7d422 100644 --- a/component/otelcol/receiver/prometheus/prometheus.go +++ b/component/otelcol/receiver/prometheus/prometheus.go @@ -15,21 +15,19 @@ import ( "github.com/grafana/agent/component/otelcol/receiver/prometheus/internal" "github.com/grafana/agent/pkg/build" "github.com/grafana/agent/pkg/util/zapadapter" - otel_service "github.com/grafana/agent/service/otel" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" otelcomponent "go.opentelemetry.io/collector/component" otelreceiver "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/trace" + metricNoop "go.opentelemetry.io/otel/metric/noop" + traceNoop "go.opentelemetry.io/otel/trace/noop" ) func init() { component.Register(component.Registration{ - Name: "otelcol.receiver.prometheus", - Args: Arguments{}, - Exports: Exports{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.receiver.prometheus", + Args: Arguments{}, + Exports: Exports{}, Build: func(o component.Options, a component.Arguments) (component.Component, error) { return New(o, a.(Arguments)) @@ -102,15 +100,26 @@ func (c *Component) Update(newConfig component.Arguments) error { useStartTimeMetric = false startTimeMetricRegex *regexp.Regexp + // Start time for Summary, Histogram and Sum metrics can be retrieved from `_created` metrics. + useCreatedMetric = false + + // Trimming the metric suffixes is used to remove the metric type and the unit and the end of the metric name. + // To trim the unit, the opentelemetry code uses the MetricMetadataStore which is currently not supported by the agent. + // When supported, this could be added as an arg. + trimMetricSuffixes = false + gcInterval = 5 * time.Minute ) settings := otelreceiver.CreateSettings{ + + ID: otelcomponent.NewID(otelcomponent.Type(c.opts.ID)), + TelemetrySettings: otelcomponent.TelemetrySettings{ Logger: zapadapter.New(c.opts.Logger), // TODO(tpaschalis): expose tracing and logging statistics. - TracerProvider: trace.NewNoopTracerProvider(), - MeterProvider: noop.NewMeterProvider(), + TracerProvider: traceNoop.NewTracerProvider(), + MeterProvider: metricNoop.NewMeterProvider(), ReportComponentStatus: func(*otelcomponent.StatusEvent) error { return nil @@ -131,8 +140,9 @@ func (c *Component) Update(newConfig component.Arguments) error { gcInterval, useStartTimeMetric, startTimeMetricRegex, - otelcomponent.NewID(otelcomponent.Type(c.opts.ID)), + useCreatedMetric, labels.Labels{}, + trimMetricSuffixes, ) if err != nil { return err diff --git a/component/otelcol/receiver/prometheus/prometheus_test.go b/component/otelcol/receiver/prometheus/prometheus_test.go index 3002916eb64a..3877bad7c990 100644 --- a/component/otelcol/receiver/prometheus/prometheus_test.go +++ b/component/otelcol/receiver/prometheus/prometheus_test.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/agent/pkg/util" "github.com/grafana/river" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" "github.com/stretchr/testify/require" @@ -59,16 +60,33 @@ func Test(t *testing.T) { {Name: model.JobLabel, Value: "testJob"}, {Name: model.InstanceLabel, Value: "otelcol.receiver.prometheus"}, {Name: "foo", Value: "bar"}, + {Name: model.MetricNameLabel, Value: "otel_scope_info"}, + {Name: "otel_scope_name", Value: "go.opentelemetry.io.contrib.instrumentation.net.http.otelhttp"}, + {Name: "otel_scope_version", Value: "v0.24.0"}, } ts := time.Now().Unix() v := 100. + exemplarLabels := labels.Labels{ + {Name: model.MetricNameLabel, Value: "testMetric"}, + {Name: "trace_id", Value: "123456789abcdef0123456789abcdef0"}, + {Name: "span_id", Value: "123456789abcdef0"}, + } + exemplar := exemplar.Exemplar{ + Value: 2, + Ts: ts, + HasTs: true, + Labels: exemplarLabels, + } + ctx := context.Background() ctx = scrape.ContextWithMetricMetadataStore(ctx, flowprometheus.NoopMetadataStore{}) ctx = scrape.ContextWithTarget(ctx, &scrape.Target{}) app := exports.Receiver.Appender(ctx) _, err := app.Append(0, l, ts, v) require.NoError(t, err) + _, err = app.AppendExemplar(0, l, exemplar) + require.NoError(t, err) require.NoError(t, app.Commit()) }() @@ -79,6 +97,14 @@ func Test(t *testing.T) { case m := <-metricCh: require.Equal(t, 1, m.MetricCount()) require.Equal(t, "testMetric", m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Name()) + require.Equal(t, "go.opentelemetry.io.contrib.instrumentation.net.http.otelhttp", m.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope().Name()) + require.Equal(t, "v0.24.0", m.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope().Version()) + require.Equal(t, "Gauge", m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Type().String()) + require.Equal(t, 1, m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().Len()) + require.Equal(t, 1, m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Exemplars().Len()) + require.Equal(t, "123456789abcdef0123456789abcdef0", m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Exemplars().At(0).TraceID().String()) + require.Equal(t, "123456789abcdef0", m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Exemplars().At(0).SpanID().String()) + require.Equal(t, 2.0, m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Exemplars().At(0).DoubleValue()) } } diff --git a/component/otelcol/receiver/vcenter/vcenter.go b/component/otelcol/receiver/vcenter/vcenter.go new file mode 100644 index 000000000000..346110da1ecd --- /dev/null +++ b/component/otelcol/receiver/vcenter/vcenter.go @@ -0,0 +1,329 @@ +// Package vcenter provides an otelcol.receiver.vcenter component. +package vcenter + +import ( + "fmt" + "net/url" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/otelcol" + "github.com/grafana/agent/component/otelcol/receiver" + "github.com/grafana/river/rivertypes" + "github.com/mitchellh/mapstructure" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" + otelcomponent "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configopaque" + otelextension "go.opentelemetry.io/collector/extension" +) + +func init() { + component.Register(component.Registration{ + Name: "otelcol.receiver.vcenter", + Args: Arguments{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + fact := vcenterreceiver.NewFactory() + return receiver.New(opts, fact, args.(Arguments)) + }, + }) +} + +type MetricConfig struct { + Enabled bool `river:"enabled,attr"` +} + +func (r *MetricConfig) Convert() map[string]interface{} { + if r == nil { + return nil + } + + return map[string]interface{}{ + "enabled": r.Enabled, + } +} + +type MetricsConfig struct { + VcenterClusterCPUEffective MetricConfig `river:"vcenter.cluster.cpu.effective,block,optional"` + VcenterClusterCPULimit MetricConfig `river:"vcenter.cluster.cpu.limit,block,optional"` + VcenterClusterHostCount MetricConfig `river:"vcenter.cluster.host.count,block,optional"` + VcenterClusterMemoryEffective MetricConfig `river:"vcenter.cluster.memory.effective,block,optional"` + VcenterClusterMemoryLimit MetricConfig `river:"vcenter.cluster.memory.limit,block,optional"` + VcenterClusterMemoryUsed MetricConfig `river:"vcenter.cluster.memory.used,block,optional"` + VcenterClusterVMCount MetricConfig `river:"vcenter.cluster.vm.count,block,optional"` + VcenterDatastoreDiskUsage MetricConfig `river:"vcenter.datastore.disk.usage,block,optional"` + VcenterDatastoreDiskUtilization MetricConfig `river:"vcenter.datastore.disk.utilization,block,optional"` + VcenterHostCPUUsage MetricConfig `river:"vcenter.host.cpu.usage,block,optional"` + VcenterHostCPUUtilization MetricConfig `river:"vcenter.host.cpu.utilization,block,optional"` + VcenterHostDiskLatencyAvg MetricConfig `river:"vcenter.host.disk.latency.avg,block,optional"` + VcenterHostDiskLatencyMax MetricConfig `river:"vcenter.host.disk.latency.max,block,optional"` + VcenterHostDiskThroughput MetricConfig `river:"vcenter.host.disk.throughput,block,optional"` + VcenterHostMemoryUsage MetricConfig `river:"vcenter.host.memory.usage,block,optional"` + VcenterHostMemoryUtilization MetricConfig `river:"vcenter.host.memory.utilization,block,optional"` + VcenterHostNetworkPacketCount MetricConfig `river:"vcenter.host.network.packet.count,block,optional"` + VcenterHostNetworkPacketErrors MetricConfig `river:"vcenter.host.network.packet.errors,block,optional"` + VcenterHostNetworkThroughput MetricConfig `river:"vcenter.host.network.throughput,block,optional"` + VcenterHostNetworkUsage MetricConfig `river:"vcenter.host.network.usage,block,optional"` + VcenterResourcePoolCPUShares MetricConfig `river:"vcenter.resource_pool.cpu.shares,block,optional"` + VcenterResourcePoolCPUUsage MetricConfig `river:"vcenter.resource_pool.cpu.usage,block,optional"` + VcenterResourcePoolMemoryShares MetricConfig `river:"vcenter.resource_pool.memory.shares,block,optional"` + VcenterResourcePoolMemoryUsage MetricConfig `river:"vcenter.resource_pool.memory.usage,block,optional"` + VcenterVMCPUUsage MetricConfig `river:"vcenter.vm.cpu.usage,block,optional"` + VcenterVMCPUUtilization MetricConfig `river:"vcenter.vm.cpu.utilization,block,optional"` + VcenterVMDiskLatencyAvg MetricConfig `river:"vcenter.vm.disk.latency.avg,block,optional"` + VcenterVMDiskLatencyMax MetricConfig `river:"vcenter.vm.disk.latency.max,block,optional"` + VcenterVMDiskThroughput MetricConfig `river:"vcenter.vm.disk.throughput,block,optional"` + VcenterVMDiskUsage MetricConfig `river:"vcenter.vm.disk.usage,block,optional"` + VcenterVMDiskUtilization MetricConfig `river:"vcenter.vm.disk.utilization,block,optional"` + VcenterVMMemoryBallooned MetricConfig `river:"vcenter.vm.memory.ballooned,block,optional"` + VcenterVMMemorySwapped MetricConfig `river:"vcenter.vm.memory.swapped,block,optional"` + VcenterVMMemorySwappedSsd MetricConfig `river:"vcenter.vm.memory.swapped_ssd,block,optional"` + VcenterVMMemoryUsage MetricConfig `river:"vcenter.vm.memory.usage,block,optional"` + VcenterVMMemoryUtilization MetricConfig `river:"vcenter.vm.memory.utilization,block,optional"` + VcenterVMNetworkPacketCount MetricConfig `river:"vcenter.vm.network.packet.count,block,optional"` + VcenterVMNetworkThroughput MetricConfig `river:"vcenter.vm.network.throughput,block,optional"` + VcenterVMNetworkUsage MetricConfig `river:"vcenter.vm.network.usage,block,optional"` +} + +func (args *MetricsConfig) Convert() map[string]interface{} { + if args == nil { + return nil + } + + return map[string]interface{}{ + "vcenter.cluster.cpu.effective": args.VcenterClusterCPUEffective.Convert(), + "vcenter.cluster.cpu.limit": args.VcenterClusterCPULimit.Convert(), + "vcenter.cluster.host.count": args.VcenterClusterHostCount.Convert(), + "vcenter.cluster.memory.effective": args.VcenterClusterMemoryEffective.Convert(), + "vcenter.cluster.memory.limit": args.VcenterClusterMemoryLimit.Convert(), + "vcenter.cluster.memory.used": args.VcenterClusterMemoryUsed.Convert(), + "vcenter.cluster.vm.count": args.VcenterClusterVMCount.Convert(), + "vcenter.datastore.disk.usage": args.VcenterDatastoreDiskUsage.Convert(), + "vcenter.datastore.disk.utilization": args.VcenterDatastoreDiskUtilization.Convert(), + "vcenter.host.cpu.usage": args.VcenterHostCPUUsage.Convert(), + "vcenter.host.cpu.utilization": args.VcenterHostCPUUtilization.Convert(), + "vcenter.host.disk.latency.avg": args.VcenterHostDiskLatencyAvg.Convert(), + "vcenter.host.disk.latency.max": args.VcenterHostDiskLatencyMax.Convert(), + "vcenter.host.disk.throughput": args.VcenterHostDiskThroughput.Convert(), + "vcenter.host.memory.usage": args.VcenterHostMemoryUsage.Convert(), + "vcenter.host.memory.utilization": args.VcenterHostMemoryUtilization.Convert(), + "vcenter.host.network.packet.count": args.VcenterHostNetworkPacketCount.Convert(), + "vcenter.host.network.packet.errors": args.VcenterHostNetworkPacketErrors.Convert(), + "vcenter.host.network.throughput": args.VcenterHostNetworkThroughput.Convert(), + "vcenter.host.network.usage": args.VcenterHostNetworkUsage.Convert(), + "vcenter.resource_pool.cpu.shares": args.VcenterResourcePoolCPUShares.Convert(), + "vcenter.resource_pool.cpu.usage": args.VcenterResourcePoolCPUUsage.Convert(), + "vcenter.resource_pool.memory.shares": args.VcenterResourcePoolMemoryShares.Convert(), + "vcenter.resource_pool.memory.usage": args.VcenterResourcePoolMemoryUsage.Convert(), + "vcenter.vm.cpu.usage": args.VcenterVMCPUUsage.Convert(), + "vcenter.vm.cpu.utilization": args.VcenterVMCPUUtilization.Convert(), + "vcenter.vm.disk.latency.avg": args.VcenterVMDiskLatencyAvg.Convert(), + "vcenter.vm.disk.latency.max": args.VcenterVMDiskLatencyMax.Convert(), + "vcenter.vm.disk.throughput": args.VcenterVMDiskThroughput.Convert(), + "vcenter.vm.disk.usage": args.VcenterVMDiskUsage.Convert(), + "vcenter.vm.disk.utilization": args.VcenterVMDiskUtilization.Convert(), + "vcenter.vm.memory.ballooned": args.VcenterVMMemoryBallooned.Convert(), + "vcenter.vm.memory.swapped": args.VcenterVMMemorySwapped.Convert(), + "vcenter.vm.memory.swapped_ssd": args.VcenterVMMemorySwappedSsd.Convert(), + "vcenter.vm.memory.usage": args.VcenterVMMemoryUsage.Convert(), + "vcenter.vm.memory.utilization": args.VcenterVMMemoryUtilization.Convert(), + "vcenter.vm.network.packet.count": args.VcenterVMNetworkPacketCount.Convert(), + "vcenter.vm.network.throughput": args.VcenterVMNetworkThroughput.Convert(), + "vcenter.vm.network.usage": args.VcenterVMNetworkUsage.Convert()} +} + +type ResourceAttributeConfig struct { + Enabled bool `river:"enabled,attr"` +} + +func (r *ResourceAttributeConfig) Convert() map[string]interface{} { + if r == nil { + return nil + } + + return map[string]interface{}{ + "enabled": r.Enabled, + } +} + +type ResourceAttributesConfig struct { + VcenterClusterName ResourceAttributeConfig `river:"vcenter.cluster.name,block,optional"` + VcenterDatastoreName ResourceAttributeConfig `river:"vcenter.datastore.name,block,optional"` + VcenterHostName ResourceAttributeConfig `river:"vcenter.host.name,block,optional"` + VcenterResourcePoolInventoryPath ResourceAttributeConfig `river:"vcenter.resource_pool.inventory_path,block,optional"` + VcenterResourcePoolName ResourceAttributeConfig `river:"vcenter.resource_pool.name,block,optional"` + VcenterVMID ResourceAttributeConfig `river:"vcenter.vm.id,block,optional"` + VcenterVMName ResourceAttributeConfig `river:"vcenter.vm.name,block,optional"` +} + +func (args *ResourceAttributesConfig) Convert() map[string]interface{} { + if args == nil { + return nil + } + + res := map[string]interface{}{ + "vcenter.cluster.name": args.VcenterClusterName.Convert(), + "vcenter.datastore.name": args.VcenterDatastoreName.Convert(), + "vcenter.host.name": args.VcenterHostName.Convert(), + "vcenter.resource_pool.inventory_path": args.VcenterResourcePoolInventoryPath.Convert(), + "vcenter.resource_pool.name": args.VcenterResourcePoolName.Convert(), + "vcenter.vm.id": args.VcenterVMID.Convert(), + "vcenter.vm.name": args.VcenterVMName.Convert(), + } + + return res +} + +type MetricsBuilderConfig struct { + Metrics MetricsConfig `river:"metrics,block,optional"` + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +func (args *MetricsBuilderConfig) Convert() map[string]interface{} { + if args == nil { + return nil + } + + res := map[string]interface{}{ + "metrics": args.Metrics.Convert(), + "resource_attributes": args.ResourceAttributes.Convert(), + } + + return res +} + +// Arguments configures the otelcol.receiver.vcenter component. +type Arguments struct { + Endpoint string `river:"endpoint,attr"` + Username string `river:"username,attr"` + Password rivertypes.Secret `river:"password,attr"` + + MetricsBuilderConfig MetricsBuilderConfig `river:",squash"` + + ScraperControllerArguments otelcol.ScraperControllerArguments `river:",squash"` + TLS otelcol.TLSClientArguments `river:"tls,block,optional"` + + // DebugMetrics configures component internal metrics. Optional. + DebugMetrics otelcol.DebugMetricsArguments `river:"debug_metrics,block,optional"` + + // Output configures where to send received data. Required. + Output *otelcol.ConsumerArguments `river:"output,block"` +} + +var _ receiver.Arguments = Arguments{} + +var ( + // DefaultArguments holds default values for Arguments. + DefaultArguments = Arguments{ + ScraperControllerArguments: otelcol.DefaultScraperControllerArguments, + MetricsBuilderConfig: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + VcenterClusterCPUEffective: MetricConfig{Enabled: true}, + VcenterClusterCPULimit: MetricConfig{Enabled: true}, + VcenterClusterHostCount: MetricConfig{Enabled: true}, + VcenterClusterMemoryEffective: MetricConfig{Enabled: true}, + VcenterClusterMemoryLimit: MetricConfig{Enabled: true}, + VcenterClusterMemoryUsed: MetricConfig{Enabled: true}, + VcenterClusterVMCount: MetricConfig{Enabled: true}, + VcenterDatastoreDiskUsage: MetricConfig{Enabled: true}, + VcenterDatastoreDiskUtilization: MetricConfig{Enabled: true}, + VcenterHostCPUUsage: MetricConfig{Enabled: true}, + VcenterHostCPUUtilization: MetricConfig{Enabled: true}, + VcenterHostDiskLatencyAvg: MetricConfig{Enabled: true}, + VcenterHostDiskLatencyMax: MetricConfig{Enabled: true}, + VcenterHostDiskThroughput: MetricConfig{Enabled: true}, + VcenterHostMemoryUsage: MetricConfig{Enabled: true}, + VcenterHostMemoryUtilization: MetricConfig{Enabled: true}, + VcenterHostNetworkPacketCount: MetricConfig{Enabled: true}, + VcenterHostNetworkPacketErrors: MetricConfig{Enabled: true}, + VcenterHostNetworkThroughput: MetricConfig{Enabled: true}, + VcenterHostNetworkUsage: MetricConfig{Enabled: true}, + VcenterResourcePoolCPUShares: MetricConfig{Enabled: true}, + VcenterResourcePoolCPUUsage: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryShares: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryUsage: MetricConfig{Enabled: true}, + VcenterVMCPUUsage: MetricConfig{Enabled: true}, + VcenterVMCPUUtilization: MetricConfig{Enabled: true}, + VcenterVMDiskLatencyAvg: MetricConfig{Enabled: true}, + VcenterVMDiskLatencyMax: MetricConfig{Enabled: true}, + VcenterVMDiskThroughput: MetricConfig{Enabled: true}, + VcenterVMDiskUsage: MetricConfig{Enabled: true}, + VcenterVMDiskUtilization: MetricConfig{Enabled: true}, + VcenterVMMemoryBallooned: MetricConfig{Enabled: true}, + VcenterVMMemorySwapped: MetricConfig{Enabled: true}, + VcenterVMMemorySwappedSsd: MetricConfig{Enabled: true}, + VcenterVMMemoryUsage: MetricConfig{Enabled: true}, + VcenterVMMemoryUtilization: MetricConfig{Enabled: false}, + VcenterVMNetworkPacketCount: MetricConfig{Enabled: true}, + VcenterVMNetworkThroughput: MetricConfig{Enabled: true}, + VcenterVMNetworkUsage: MetricConfig{Enabled: true}, + }, + ResourceAttributes: ResourceAttributesConfig{ + VcenterClusterName: ResourceAttributeConfig{Enabled: true}, + VcenterDatastoreName: ResourceAttributeConfig{Enabled: true}, + VcenterHostName: ResourceAttributeConfig{Enabled: true}, + VcenterResourcePoolInventoryPath: ResourceAttributeConfig{Enabled: true}, + VcenterResourcePoolName: ResourceAttributeConfig{Enabled: true}, + VcenterVMID: ResourceAttributeConfig{Enabled: true}, + VcenterVMName: ResourceAttributeConfig{Enabled: true}, + }, + }, + } +) + +// SetToDefault implements river.Defaulter. +func (args *Arguments) SetToDefault() { + *args = DefaultArguments +} + +// Convert implements receiver.Arguments. +func (args Arguments) Convert() (otelcomponent.Config, error) { + cfg := args.MetricsBuilderConfig.Convert() + + var result vcenterreceiver.Config + err := mapstructure.Decode(cfg, &result) + + if err != nil { + return nil, err + } + + result.Endpoint = args.Endpoint + result.Username = args.Username + result.Password = configopaque.String(args.Password) + result.TLSClientSetting = *args.TLS.Convert() + result.ScraperControllerSettings = *args.ScraperControllerArguments.Convert() + + return &result, nil +} + +// Validate checks to see if the supplied config will work for the receiver +func (args Arguments) Validate() error { + res, err := url.Parse(args.Endpoint) + if err != nil { + return fmt.Errorf("unable to parse url %s: %w", args.Endpoint, err) + } + + if res.Scheme != "http" && res.Scheme != "https" { + return fmt.Errorf("url scheme must be http or https") + } + return nil +} + +// Extensions implements receiver.Arguments. +func (args Arguments) Extensions() map[otelcomponent.ID]otelextension.Extension { + return nil +} + +// Exporters implements receiver.Arguments. +func (args Arguments) Exporters() map[otelcomponent.DataType]map[otelcomponent.ID]otelcomponent.Component { + return nil +} + +// NextConsumers implements receiver.Arguments. +func (args Arguments) NextConsumers() *otelcol.ConsumerArguments { + return args.Output +} + +// DebugMetricsConfig implements receiver.Arguments. +func (args Arguments) DebugMetricsConfig() otelcol.DebugMetricsArguments { + return args.DebugMetrics +} diff --git a/component/otelcol/receiver/vcenter/vcenter_test.go b/component/otelcol/receiver/vcenter/vcenter_test.go new file mode 100644 index 000000000000..bcc6896dc4b2 --- /dev/null +++ b/component/otelcol/receiver/vcenter/vcenter_test.go @@ -0,0 +1,226 @@ +package vcenter + +import ( + "testing" + "time" + + "github.com/grafana/river" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" + "github.com/stretchr/testify/require" +) + +func TestArguments_UnmarshalRiver(t *testing.T) { + in := ` + endpoint = "http://localhost:1234" + username = "user" + password = "pass" + collection_interval = "2m" + + resource_attributes { + vcenter.cluster.name { + enabled = true + } + vcenter.datastore.name { + enabled = true + } + vcenter.host.name { + enabled = true + } + vcenter.resource_pool.inventory_path { + enabled = false + } + vcenter.resource_pool.name { + enabled = true + } + vcenter.vm.name { + enabled = true + } + } + + metrics { + vcenter.cluster.cpu.effective { + enabled = false + } + vcenter.cluster.cpu.limit { + enabled = true + } + vcenter.cluster.host.count { + enabled = true + } + vcenter.cluster.memory.effective { + enabled = true + } + vcenter.cluster.memory.limit { + enabled = true + } + vcenter.cluster.memory.used { + enabled = true + } + vcenter.cluster.vm.count { + enabled = true + } + vcenter.datastore.disk.usage { + enabled = true + } + vcenter.datastore.disk.utilization { + enabled = true + } + vcenter.host.cpu.usage { + enabled = true + } + vcenter.host.cpu.utilization { + enabled = true + } + vcenter.host.disk.latency.avg { + enabled = true + } + vcenter.host.disk.latency.max { + enabled = true + } + vcenter.host.disk.throughput { + enabled = true + } + vcenter.host.memory.usage { + enabled = true + } + vcenter.host.memory.utilization { + enabled = true + } + vcenter.host.network.packet.count { + enabled = true + } + vcenter.host.network.packet.errors { + enabled = true + } + vcenter.host.network.throughput { + enabled = true + } + vcenter.host.network.usage { + enabled = true + } + vcenter.resource_pool.cpu.shares { + enabled = true + } + vcenter.resource_pool.cpu.usage { + enabled = true + } + vcenter.resource_pool.memory.shares { + enabled = true + } + vcenter.resource_pool.memory.usage { + enabled = true + } + vcenter.vm.cpu.usage { + enabled = true + } + vcenter.vm.cpu.utilization { + enabled = true + } + vcenter.vm.disk.latency.avg { + enabled = true + } + vcenter.vm.disk.latency.max { + enabled = true + } + vcenter.vm.disk.throughput { + enabled = true + } + vcenter.vm.disk.usage { + enabled = true + } + vcenter.vm.disk.utilization { + enabled = true + } + vcenter.vm.memory.ballooned { + enabled = true + } + vcenter.vm.memory.swapped { + enabled = true + } + vcenter.vm.memory.swapped_ssd { + enabled = true + } + vcenter.vm.memory.usage { + enabled = true + } + vcenter.vm.network.packet.count { + enabled = true + } + vcenter.vm.network.throughput { + enabled = true + } + vcenter.vm.network.usage { + enabled = true + } + } + + output { /* no-op */ } + ` + + var args Arguments + require.NoError(t, river.Unmarshal([]byte(in), &args)) + args.Convert() + ext, err := args.Convert() + require.NoError(t, err) + otelArgs, ok := (ext).(*vcenterreceiver.Config) + + require.True(t, ok) + + require.Equal(t, "user", otelArgs.Username) + require.Equal(t, "pass", string(otelArgs.Password)) + require.Equal(t, "http://localhost:1234", otelArgs.Endpoint) + + require.Equal(t, 2*time.Minute, otelArgs.ScraperControllerSettings.CollectionInterval) + require.Equal(t, time.Second, otelArgs.ScraperControllerSettings.InitialDelay) + require.Equal(t, 0*time.Second, otelArgs.ScraperControllerSettings.Timeout) + + // Verify ResourceAttributesConfig fields + require.True(t, otelArgs.ResourceAttributes.VcenterClusterName.Enabled) + require.True(t, otelArgs.ResourceAttributes.VcenterDatastoreName.Enabled) + require.True(t, otelArgs.ResourceAttributes.VcenterHostName.Enabled) + require.False(t, otelArgs.ResourceAttributes.VcenterResourcePoolInventoryPath.Enabled) + require.True(t, otelArgs.ResourceAttributes.VcenterResourcePoolName.Enabled) + require.True(t, otelArgs.ResourceAttributes.VcenterVMName.Enabled) + require.True(t, otelArgs.ResourceAttributes.VcenterVMID.Enabled) + + // Verify MetricsConfig fields + require.False(t, otelArgs.Metrics.VcenterClusterCPUEffective.Enabled) + require.True(t, otelArgs.Metrics.VcenterClusterCPULimit.Enabled) + require.True(t, otelArgs.Metrics.VcenterClusterHostCount.Enabled) + require.True(t, otelArgs.Metrics.VcenterClusterMemoryEffective.Enabled) + require.True(t, otelArgs.Metrics.VcenterClusterMemoryLimit.Enabled) + require.True(t, otelArgs.Metrics.VcenterClusterMemoryUsed.Enabled) + require.True(t, otelArgs.Metrics.VcenterClusterVMCount.Enabled) + require.True(t, otelArgs.Metrics.VcenterDatastoreDiskUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterDatastoreDiskUtilization.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostCPUUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostCPUUtilization.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostDiskLatencyAvg.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostDiskLatencyMax.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostDiskThroughput.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostMemoryUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostMemoryUtilization.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostNetworkPacketCount.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostNetworkPacketErrors.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostNetworkThroughput.Enabled) + require.True(t, otelArgs.Metrics.VcenterHostNetworkUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterResourcePoolCPUShares.Enabled) + require.True(t, otelArgs.Metrics.VcenterResourcePoolCPUUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterResourcePoolMemoryShares.Enabled) + require.True(t, otelArgs.Metrics.VcenterResourcePoolMemoryUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMCPUUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMCPUUtilization.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMDiskLatencyAvg.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMDiskLatencyMax.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMDiskThroughput.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMDiskUsage.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMDiskUtilization.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMMemoryBallooned.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMMemorySwapped.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMMemorySwappedSsd.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMMemoryUsage.Enabled) + require.False(t, otelArgs.Metrics.VcenterVMMemoryUtilization.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMNetworkPacketCount.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMNetworkThroughput.Enabled) + require.True(t, otelArgs.Metrics.VcenterVMNetworkUsage.Enabled) +} diff --git a/component/otelcol/receiver/zipkin/zipkin.go b/component/otelcol/receiver/zipkin/zipkin.go index 50ac8fb23cb7..1727d38a0d05 100644 --- a/component/otelcol/receiver/zipkin/zipkin.go +++ b/component/otelcol/receiver/zipkin/zipkin.go @@ -5,7 +5,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/receiver" - otel_service "github.com/grafana/agent/service/otel" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" @@ -13,9 +12,8 @@ import ( func init() { component.Register(component.Registration{ - Name: "otelcol.receiver.zipkin", - Args: Arguments{}, - NeedsServices: []string{otel_service.ServiceName}, + Name: "otelcol.receiver.zipkin", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { fact := zipkinreceiver.NewFactory() diff --git a/component/prometheus/exporter/agent/agent.go b/component/prometheus/exporter/agent/agent.go index 6d8064d2771b..5a02005c92b1 100644 --- a/component/prometheus/exporter/agent/agent.go +++ b/component/prometheus/exporter/agent/agent.go @@ -9,11 +9,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.agent", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "agent"), + Name: "prometheus.exporter.agent", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "agent"), }) } diff --git a/component/prometheus/exporter/apache/apache.go b/component/prometheus/exporter/apache/apache.go index 802792f545bd..4ba9d8166832 100644 --- a/component/prometheus/exporter/apache/apache.go +++ b/component/prometheus/exporter/apache/apache.go @@ -9,11 +9,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.apache", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "apache"), + Name: "prometheus.exporter.apache", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "apache"), }) } diff --git a/component/prometheus/exporter/azure/azure.go b/component/prometheus/exporter/azure/azure.go index 0b2d8483e6a9..fa51a1ac01b0 100644 --- a/component/prometheus/exporter/azure/azure.go +++ b/component/prometheus/exporter/azure/azure.go @@ -9,11 +9,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.azure", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "azure"), + Name: "prometheus.exporter.azure", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "azure"), }) } @@ -35,6 +35,8 @@ type Arguments struct { MetricNameTemplate string `river:"metric_name_template,attr,optional"` MetricHelpTemplate string `river:"metric_help_template,attr,optional"` AzureCloudEnvironment string `river:"azure_cloud_environment,attr,optional"` + ValidateDimensions bool `river:"validate_dimensions,attr,optional"` + Regions []string `river:"regions,attr,optional"` } var DefaultArguments = Arguments{ @@ -43,6 +45,10 @@ var DefaultArguments = Arguments{ MetricHelpTemplate: "Azure metric {metric} for {type} with aggregation {aggregation} as {unit}", IncludedResourceTags: []string{"owner"}, AzureCloudEnvironment: "azurecloud", + // Dimensions do not always apply to all metrics for a service, which requires you to configure multiple exporters + // to fully monitor a service which is tedious. Turning off validation eliminates this complexity. The underlying + // sdk will only give back the dimensions which are valid for particular metrics. + ValidateDimensions: false, } // SetToDefault implements river.Defaulter. @@ -72,5 +78,7 @@ func (a *Arguments) Convert() *azure_exporter.Config { MetricNameTemplate: a.MetricNameTemplate, MetricHelpTemplate: a.MetricHelpTemplate, AzureCloudEnvironment: a.AzureCloudEnvironment, + ValidateDimensions: a.ValidateDimensions, + Regions: a.Regions, } } diff --git a/component/prometheus/exporter/blackbox/blackbox.go b/component/prometheus/exporter/blackbox/blackbox.go index 223ac8cb8837..62c3981e82a0 100644 --- a/component/prometheus/exporter/blackbox/blackbox.go +++ b/component/prometheus/exporter/blackbox/blackbox.go @@ -19,11 +19,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.blackbox", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.NewWithTargetBuilder(createExporter, "blackbox", buildBlackboxTargets), + Name: "prometheus.exporter.blackbox", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.NewWithTargetBuilder(createExporter, "blackbox", buildBlackboxTargets), }) } @@ -67,7 +67,7 @@ var DefaultArguments = Arguments{ // BlackboxTarget defines a target to be used by the exporter. type BlackboxTarget struct { - Name string `river:",label"` + Name string `river:"name,attr"` Target string `river:"address,attr"` Module string `river:"module,attr,optional"` Labels map[string]string `river:"labels,attr,optional"` @@ -106,10 +106,14 @@ func (a *Arguments) Validate() error { return errors.New("config and config_file are mutually exclusive") } + if a.ConfigFile == "" && a.Config.Value == "" { + return errors.New("config or config_file must be set") + } + var blackboxConfig blackbox_config.Config err := yaml.UnmarshalStrict([]byte(a.Config.Value), &blackboxConfig) if err != nil { - return fmt.Errorf("invalid backbox_exporter config: %s", err) + return fmt.Errorf("invalid blackbox_exporter config: %s", err) } return nil diff --git a/component/prometheus/exporter/blackbox/blackbox_test.go b/component/prometheus/exporter/blackbox/blackbox_test.go index 1b3e169a3661..3016935ee0c2 100644 --- a/component/prometheus/exporter/blackbox/blackbox_test.go +++ b/component/prometheus/exporter/blackbox/blackbox_test.go @@ -16,11 +16,13 @@ import ( func TestUnmarshalRiver(t *testing.T) { riverCfg := ` config_file = "modules.yml" - target "target_a" { + target { + name = "target_a" address = "http://example.com" module = "http_2xx" } - target "target_b" { + target { + name = "target-b" address = "http://grafana.com" module = "http_2xx" } @@ -35,7 +37,7 @@ func TestUnmarshalRiver(t *testing.T) { require.Contains(t, "target_a", args.Targets[0].Name) require.Contains(t, "http://example.com", args.Targets[0].Target) require.Contains(t, "http_2xx", args.Targets[0].Module) - require.Contains(t, "target_b", args.Targets[1].Name) + require.Contains(t, "target-b", args.Targets[1].Name) require.Contains(t, "http://grafana.com", args.Targets[1].Target) require.Contains(t, "http_2xx", args.Targets[1].Module) } @@ -44,11 +46,13 @@ func TestUnmarshalRiverWithInlineConfig(t *testing.T) { riverCfg := ` config = "{ modules: { http_2xx: { prober: http, timeout: 5s } } }" - target "target_a" { + target { + name = "target_a" address = "http://example.com" module = "http_2xx" } - target "target_b" { + target { + name = "target-b" address = "http://grafana.com" module = "http_2xx" } @@ -68,7 +72,7 @@ func TestUnmarshalRiverWithInlineConfig(t *testing.T) { require.Contains(t, "target_a", args.Targets[0].Name) require.Contains(t, "http://example.com", args.Targets[0].Target) require.Contains(t, "http_2xx", args.Targets[0].Module) - require.Contains(t, "target_b", args.Targets[1].Name) + require.Contains(t, "target-b", args.Targets[1].Name) require.Contains(t, "http://grafana.com", args.Targets[1].Target) require.Contains(t, "http_2xx", args.Targets[1].Module) } @@ -77,11 +81,13 @@ func TestUnmarshalRiverWithInlineConfigYaml(t *testing.T) { riverCfg := ` config = "modules:\n http_2xx:\n prober: http\n timeout: 5s\n" - target "target_a" { + target { + name = "target_a" address = "http://example.com" module = "http_2xx" } - target "target_b" { + target { + name = "target-b" address = "http://grafana.com" module = "http_2xx" } @@ -101,12 +107,12 @@ func TestUnmarshalRiverWithInlineConfigYaml(t *testing.T) { require.Contains(t, "target_a", args.Targets[0].Name) require.Contains(t, "http://example.com", args.Targets[0].Target) require.Contains(t, "http_2xx", args.Targets[0].Module) - require.Contains(t, "target_b", args.Targets[1].Name) + require.Contains(t, "target-b", args.Targets[1].Name) require.Contains(t, "http://grafana.com", args.Targets[1].Target) require.Contains(t, "http_2xx", args.Targets[1].Module) } -func TestUnmarshalRiverWithInvalidInlineConfig(t *testing.T) { +func TestUnmarshalRiverWithInvalidConfig(t *testing.T) { var tests = []struct { testname string cfg string @@ -117,24 +123,26 @@ func TestUnmarshalRiverWithInvalidInlineConfig(t *testing.T) { ` config = "{ modules: { http_2xx: { prober: http, timeout: 5s }" - target "target_a" { + target { + name = "target_a" address = "http://example.com" module = "http_2xx" } `, - `invalid backbox_exporter config: yaml: line 1: did not find expected ',' or '}'`, + `invalid blackbox_exporter config: yaml: line 1: did not find expected ',' or '}'`, }, { "Invalid property", ` config = "{ module: { http_2xx: { prober: http, timeout: 5s } } }" - target "target_a" { + target { + name = "target_a" address = "http://example.com" module = "http_2xx" } `, - "invalid backbox_exporter config: yaml: unmarshal errors:\n line 1: field module not found in type config.plain", + "invalid blackbox_exporter config: yaml: unmarshal errors:\n line 1: field module not found in type config.plain", }, { "Define config and config_file", @@ -142,13 +150,35 @@ func TestUnmarshalRiverWithInvalidInlineConfig(t *testing.T) { config_file = "config" config = "{ modules: { http_2xx: { prober: http, timeout: 5s } } }" - target "target_a" { + target { + name = "target-a" address = "http://example.com" module = "http_2xx" } `, `config and config_file are mutually exclusive`, }, + { + "Define neither config nor config_file", + ` + target { + name = "target-a" + address = "http://example.com" + module = "http_2xx" + } + `, + `config or config_file must be set`, + }, + { + "Specify label for target block instead of name attribute", + ` + target "target_a" { + address = "http://example.com" + module = "http_2xx" + } + `, + `2:4: block "target" does not support specifying labels`, + }, } for _, tt := range tests { t.Run(tt.testname, func(t *testing.T) { diff --git a/component/prometheus/exporter/cadvisor/cadvisor.go b/component/prometheus/exporter/cadvisor/cadvisor.go index 79542dbf0087..2e19f82cf469 100644 --- a/component/prometheus/exporter/cadvisor/cadvisor.go +++ b/component/prometheus/exporter/cadvisor/cadvisor.go @@ -11,11 +11,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.cadvisor", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "cadvisor"), + Name: "prometheus.exporter.cadvisor", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "cadvisor"), }) } @@ -44,7 +44,8 @@ var DefaultArguments = Arguments{ DockerTLSKey: "key.pem", DockerTLSCA: "ca.pem", - DockerOnly: false, + DockerOnly: false, + DisableRootCgroupStats: false, } // Arguments configures the prometheus.exporter.cadvisor component. @@ -66,6 +67,7 @@ type Arguments struct { DockerTLSKey string `river:"docker_tls_key,attr,optional"` DockerTLSCA string `river:"docker_tls_ca,attr,optional"` DockerOnly bool `river:"docker_only,attr,optional"` + DisableRootCgroupStats bool `river:"disable_root_cgroup_stats,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -103,6 +105,7 @@ func (a *Arguments) Convert() *cadvisor.Config { DockerTLSKey: a.DockerTLSKey, DockerTLSCA: a.DockerTLSCA, DockerOnly: a.DockerOnly, + DisableRootCgroupStats: a.DisableRootCgroupStats, } return cfg diff --git a/component/prometheus/exporter/cloudwatch/cloudwatch.go b/component/prometheus/exporter/cloudwatch/cloudwatch.go index 6d8dc2b6d83c..aa957ce56562 100644 --- a/component/prometheus/exporter/cloudwatch/cloudwatch.go +++ b/component/prometheus/exporter/cloudwatch/cloudwatch.go @@ -11,11 +11,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.cloudwatch", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "cloudwatch"), + Name: "prometheus.exporter.cloudwatch", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "cloudwatch"), }) } diff --git a/component/prometheus/exporter/consul/consul.go b/component/prometheus/exporter/consul/consul.go index 862dc7042017..ce301f8ddc6b 100644 --- a/component/prometheus/exporter/consul/consul.go +++ b/component/prometheus/exporter/consul/consul.go @@ -11,11 +11,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.consul", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "consul"), + Name: "prometheus.exporter.consul", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "consul"), }) } diff --git a/component/prometheus/exporter/dnsmasq/dnsmasq.go b/component/prometheus/exporter/dnsmasq/dnsmasq.go index 5daaf17df4c0..f856fc4bd7df 100644 --- a/component/prometheus/exporter/dnsmasq/dnsmasq.go +++ b/component/prometheus/exporter/dnsmasq/dnsmasq.go @@ -9,11 +9,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.dnsmasq", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "dnsmasq"), + Name: "prometheus.exporter.dnsmasq", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "dnsmasq"), }) } diff --git a/component/prometheus/exporter/elasticsearch/elasticsearch.go b/component/prometheus/exporter/elasticsearch/elasticsearch.go index 84904e6e3fee..e09ed24c98d7 100644 --- a/component/prometheus/exporter/elasticsearch/elasticsearch.go +++ b/component/prometheus/exporter/elasticsearch/elasticsearch.go @@ -4,6 +4,7 @@ import ( "time" "github.com/grafana/agent/component" + commonCfg "github.com/grafana/agent/component/common/config" "github.com/grafana/agent/component/prometheus/exporter" "github.com/grafana/agent/pkg/integrations" "github.com/grafana/agent/pkg/integrations/elasticsearch_exporter" @@ -11,11 +12,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.elasticsearch", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "elasticsearch"), + Name: "prometheus.exporter.elasticsearch", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "elasticsearch"), }) } @@ -35,23 +36,24 @@ var DefaultArguments = Arguments{ } type Arguments struct { - Address string `river:"address,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` - AllNodes bool `river:"all,attr,optional"` - Node string `river:"node,attr,optional"` - ExportIndices bool `river:"indices,attr,optional"` - ExportIndicesSettings bool `river:"indices_settings,attr,optional"` - ExportClusterSettings bool `river:"cluster_settings,attr,optional"` - ExportShards bool `river:"shards,attr,optional"` - IncludeAliases bool `river:"aliases,attr,optional"` - ExportSnapshots bool `river:"snapshots,attr,optional"` - ExportClusterInfoInterval time.Duration `river:"clusterinfo_interval,attr,optional"` - CA string `river:"ca,attr,optional"` - ClientPrivateKey string `river:"client_private_key,attr,optional"` - ClientCert string `river:"client_cert,attr,optional"` - InsecureSkipVerify bool `river:"ssl_skip_verify,attr,optional"` - ExportDataStreams bool `river:"data_stream,attr,optional"` - ExportSLM bool `river:"slm,attr,optional"` + Address string `river:"address,attr,optional"` + Timeout time.Duration `river:"timeout,attr,optional"` + AllNodes bool `river:"all,attr,optional"` + Node string `river:"node,attr,optional"` + ExportIndices bool `river:"indices,attr,optional"` + ExportIndicesSettings bool `river:"indices_settings,attr,optional"` + ExportClusterSettings bool `river:"cluster_settings,attr,optional"` + ExportShards bool `river:"shards,attr,optional"` + IncludeAliases bool `river:"aliases,attr,optional"` + ExportSnapshots bool `river:"snapshots,attr,optional"` + ExportClusterInfoInterval time.Duration `river:"clusterinfo_interval,attr,optional"` + CA string `river:"ca,attr,optional"` + ClientPrivateKey string `river:"client_private_key,attr,optional"` + ClientCert string `river:"client_cert,attr,optional"` + InsecureSkipVerify bool `river:"ssl_skip_verify,attr,optional"` + ExportDataStreams bool `river:"data_stream,attr,optional"` + ExportSLM bool `river:"slm,attr,optional"` + BasicAuth *commonCfg.BasicAuth `river:"basic_auth,block,optional"` } // SetToDefault implements river.Defaulter. @@ -78,5 +80,6 @@ func (a *Arguments) Convert() *elasticsearch_exporter.Config { InsecureSkipVerify: a.InsecureSkipVerify, ExportDataStreams: a.ExportDataStreams, ExportSLM: a.ExportSLM, + BasicAuth: a.BasicAuth.Convert(), } } diff --git a/component/prometheus/exporter/elasticsearch/elasticsearch_test.go b/component/prometheus/exporter/elasticsearch/elasticsearch_test.go index 3e87a5a98dc6..5c71a8ac712f 100644 --- a/component/prometheus/exporter/elasticsearch/elasticsearch_test.go +++ b/component/prometheus/exporter/elasticsearch/elasticsearch_test.go @@ -4,8 +4,11 @@ import ( "testing" "time" + commonCfg "github.com/grafana/agent/component/common/config" "github.com/grafana/agent/pkg/integrations/elasticsearch_exporter" "github.com/grafana/river" + "github.com/grafana/river/rivertypes" + promCfg "github.com/prometheus/common/config" "github.com/stretchr/testify/require" ) @@ -27,6 +30,10 @@ func TestRiverUnmarshal(t *testing.T) { ssl_skip_verify = true data_stream = true slm = true + basic_auth { + username = "username" + password = "pass" + } ` var args Arguments @@ -50,6 +57,10 @@ func TestRiverUnmarshal(t *testing.T) { InsecureSkipVerify: true, ExportDataStreams: true, ExportSLM: true, + BasicAuth: &commonCfg.BasicAuth{ + Username: "username", + Password: rivertypes.Secret("pass"), + }, } require.Equal(t, expected, args) @@ -73,6 +84,10 @@ func TestConvert(t *testing.T) { ssl_skip_verify = true data_stream = true slm = true + basic_auth { + username = "username" + password = "pass" + } ` var args Arguments err := river.Unmarshal([]byte(riverConfig), &args) @@ -97,6 +112,10 @@ func TestConvert(t *testing.T) { InsecureSkipVerify: true, ExportDataStreams: true, ExportSLM: true, + BasicAuth: &promCfg.BasicAuth{ + Username: "username", + Password: promCfg.Secret("pass"), + }, } require.Equal(t, expected, *res) } diff --git a/component/prometheus/exporter/gcp/gcp.go b/component/prometheus/exporter/gcp/gcp.go index b5581b7884c4..0147b72819b3 100644 --- a/component/prometheus/exporter/gcp/gcp.go +++ b/component/prometheus/exporter/gcp/gcp.go @@ -11,11 +11,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.gcp", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "gcp"), + Name: "prometheus.exporter.gcp", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "gcp"), }) } diff --git a/component/prometheus/exporter/github/github.go b/component/prometheus/exporter/github/github.go index 354005b4e692..4d3dab5a0f2f 100644 --- a/component/prometheus/exporter/github/github.go +++ b/component/prometheus/exporter/github/github.go @@ -11,11 +11,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.github", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "github"), + Name: "prometheus.exporter.github", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "github"), }) } diff --git a/component/prometheus/exporter/kafka/kafka.go b/component/prometheus/exporter/kafka/kafka.go index 1fa01bf4d614..f68985b50d2c 100644 --- a/component/prometheus/exporter/kafka/kafka.go +++ b/component/prometheus/exporter/kafka/kafka.go @@ -9,7 +9,8 @@ import ( "github.com/grafana/agent/component/prometheus/exporter" "github.com/grafana/agent/pkg/integrations" "github.com/grafana/agent/pkg/integrations/kafka_exporter" - config_util "github.com/prometheus/common/config" + "github.com/grafana/river/rivertypes" + "github.com/prometheus/common/config" ) var DefaultArguments = Arguments{ @@ -24,37 +25,37 @@ var DefaultArguments = Arguments{ } type Arguments struct { - Instance string `river:"instance,attr,optional"` - KafkaURIs []string `river:"kafka_uris,attr,optional"` - UseSASL bool `river:"use_sasl,attr,optional"` - UseSASLHandshake bool `river:"use_sasl_handshake,attr,optional"` - SASLUsername string `river:"sasl_username,attr,optional"` - SASLPassword config_util.Secret `river:"sasl_password,attr,optional"` - SASLMechanism string `river:"sasl_mechanism,attr,optional"` - UseTLS bool `river:"use_tls,attr,optional"` - CAFile string `river:"ca_file,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` - KafkaVersion string `river:"kafka_version,attr,optional"` - UseZooKeeperLag bool `river:"use_zookeeper_lag,attr,optional"` - ZookeeperURIs []string `river:"zookeeper_uris,attr,optional"` - ClusterName string `river:"kafka_cluster_name,attr,optional"` - MetadataRefreshInterval string `river:"metadata_refresh_interval,attr,optional"` - AllowConcurrent bool `river:"allow_concurrency,attr,optional"` - MaxOffsets int `river:"max_offsets,attr,optional"` - PruneIntervalSeconds int `river:"prune_interval_seconds,attr,optional"` - TopicsFilter string `river:"topics_filter_regex,attr,optional"` - GroupFilter string `river:"groups_filter_regex,attr,optional"` + Instance string `river:"instance,attr,optional"` + KafkaURIs []string `river:"kafka_uris,attr,optional"` + UseSASL bool `river:"use_sasl,attr,optional"` + UseSASLHandshake bool `river:"use_sasl_handshake,attr,optional"` + SASLUsername string `river:"sasl_username,attr,optional"` + SASLPassword rivertypes.Secret `river:"sasl_password,attr,optional"` + SASLMechanism string `river:"sasl_mechanism,attr,optional"` + UseTLS bool `river:"use_tls,attr,optional"` + CAFile string `river:"ca_file,attr,optional"` + CertFile string `river:"cert_file,attr,optional"` + KeyFile string `river:"key_file,attr,optional"` + InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` + KafkaVersion string `river:"kafka_version,attr,optional"` + UseZooKeeperLag bool `river:"use_zookeeper_lag,attr,optional"` + ZookeeperURIs []string `river:"zookeeper_uris,attr,optional"` + ClusterName string `river:"kafka_cluster_name,attr,optional"` + MetadataRefreshInterval string `river:"metadata_refresh_interval,attr,optional"` + AllowConcurrent bool `river:"allow_concurrency,attr,optional"` + MaxOffsets int `river:"max_offsets,attr,optional"` + PruneIntervalSeconds int `river:"prune_interval_seconds,attr,optional"` + TopicsFilter string `river:"topics_filter_regex,attr,optional"` + GroupFilter string `river:"groups_filter_regex,attr,optional"` } func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.kafka", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.NewWithTargetBuilder(createExporter, "kafka", customizeTarget), + Name: "prometheus.exporter.kafka", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.NewWithTargetBuilder(createExporter, "kafka", customizeTarget), }) } @@ -93,7 +94,7 @@ func (a *Arguments) Convert() *kafka_exporter.Config { UseSASL: a.UseSASL, UseSASLHandshake: a.UseSASLHandshake, SASLUsername: a.SASLUsername, - SASLPassword: a.SASLPassword, + SASLPassword: config.Secret(a.SASLPassword), SASLMechanism: a.SASLMechanism, UseTLS: a.UseTLS, CAFile: a.CAFile, diff --git a/component/prometheus/exporter/kafka/kafka_test.go b/component/prometheus/exporter/kafka/kafka_test.go index 26f321dc39fa..4209da21cb7d 100644 --- a/component/prometheus/exporter/kafka/kafka_test.go +++ b/component/prometheus/exporter/kafka/kafka_test.go @@ -107,3 +107,15 @@ func TestCustomizeTarget(t *testing.T) { require.Equal(t, 1, len(newTargets)) require.Equal(t, "example", newTargets[0]["instance"]) } + +func TestSASLPassword(t *testing.T) { // #6044 + var exampleRiverConfig = ` + kafka_uris = ["broker1"] + use_sasl = true + sasl_password = "foobar" + ` + + var args Arguments + err := river.Unmarshal([]byte(exampleRiverConfig), &args) + require.NoError(t, err) +} diff --git a/component/prometheus/exporter/memcached/memcached.go b/component/prometheus/exporter/memcached/memcached.go index de5213a273a2..09d5214855fc 100644 --- a/component/prometheus/exporter/memcached/memcached.go +++ b/component/prometheus/exporter/memcached/memcached.go @@ -12,11 +12,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.memcached", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "memcached"), + Name: "prometheus.exporter.memcached", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "memcached"), }) } diff --git a/component/prometheus/exporter/mongodb/mongodb.go b/component/prometheus/exporter/mongodb/mongodb.go index 6905865285bd..0c0064c5b5c1 100644 --- a/component/prometheus/exporter/mongodb/mongodb.go +++ b/component/prometheus/exporter/mongodb/mongodb.go @@ -11,11 +11,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.mongodb", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "mongodb"), + Name: "prometheus.exporter.mongodb", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "mongodb"), }) } diff --git a/component/prometheus/exporter/mssql/mssql.go b/component/prometheus/exporter/mssql/mssql.go index 819be78a8927..bef73f16a44c 100644 --- a/component/prometheus/exporter/mssql/mssql.go +++ b/component/prometheus/exporter/mssql/mssql.go @@ -2,23 +2,27 @@ package mssql import ( "errors" + "fmt" "time" + "github.com/burningalchemist/sql_exporter/config" "github.com/grafana/agent/component" "github.com/grafana/agent/component/prometheus/exporter" "github.com/grafana/agent/pkg/integrations" "github.com/grafana/agent/pkg/integrations/mssql" + "github.com/grafana/agent/pkg/util" "github.com/grafana/river/rivertypes" config_util "github.com/prometheus/common/config" + "gopkg.in/yaml.v2" ) func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.mssql", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "mssql"), + Name: "prometheus.exporter.mssql", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "mssql"), }) } @@ -36,10 +40,11 @@ var DefaultArguments = Arguments{ // Arguments controls the mssql exporter. type Arguments struct { - ConnectionString rivertypes.Secret `river:"connection_string,attr"` - MaxIdleConnections int `river:"max_idle_connections,attr,optional"` - MaxOpenConnections int `river:"max_open_connections,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + ConnectionString rivertypes.Secret `river:"connection_string,attr"` + MaxIdleConnections int `river:"max_idle_connections,attr,optional"` + MaxOpenConnections int `river:"max_open_connections,attr,optional"` + Timeout time.Duration `river:"timeout,attr,optional"` + QueryConfig rivertypes.OptionalSecret `river:"query_config,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -60,6 +65,13 @@ func (a *Arguments) Validate() error { if a.Timeout <= 0 { return errors.New("timeout must be positive") } + + var collectorConfig config.CollectorConfig + err := yaml.UnmarshalStrict([]byte(a.QueryConfig.Value), &collectorConfig) + if err != nil { + return fmt.Errorf("invalid query_config: %s", err) + } + return nil } @@ -69,5 +81,6 @@ func (a *Arguments) Convert() *mssql.Config { MaxIdleConnections: a.MaxIdleConnections, MaxOpenConnections: a.MaxOpenConnections, Timeout: a.Timeout, + QueryConfig: util.RawYAML(a.QueryConfig.Value), } } diff --git a/component/prometheus/exporter/mssql/mssql_test.go b/component/prometheus/exporter/mssql/mssql_test.go index b9a47ad3b776..4fad4a819780 100644 --- a/component/prometheus/exporter/mssql/mssql_test.go +++ b/component/prometheus/exporter/mssql/mssql_test.go @@ -4,11 +4,13 @@ import ( "testing" "time" + "github.com/burningalchemist/sql_exporter/config" "github.com/grafana/agent/pkg/integrations/mssql" "github.com/grafana/river" "github.com/grafana/river/rivertypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" ) func TestRiverUnmarshal(t *testing.T) { @@ -16,8 +18,7 @@ func TestRiverUnmarshal(t *testing.T) { connection_string = "sqlserver://user:pass@localhost:1433" max_idle_connections = 3 max_open_connections = 3 - timeout = "10s" - ` + timeout = "10s"` var args Arguments err := river.Unmarshal([]byte(riverConfig), &args) @@ -33,6 +34,64 @@ func TestRiverUnmarshal(t *testing.T) { require.Equal(t, expected, args) } +func TestRiverUnmarshalWithInlineQueryConfig(t *testing.T) { + riverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 3 + max_open_connections = 3 + timeout = "10s" + query_config = "{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\" } ] }"` + + var args Arguments + err := river.Unmarshal([]byte(riverConfig), &args) + require.NoError(t, err) + var collectorConfig config.CollectorConfig + err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) + require.NoError(t, err) + + require.Equal(t, rivertypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) + require.Equal(t, 3, args.MaxIdleConnections) + require.Equal(t, 3, args.MaxOpenConnections) + require.Equal(t, 10*time.Second, args.Timeout) + require.Equal(t, "mssql_standard", collectorConfig.Name) + require.Equal(t, 1, len(collectorConfig.Metrics)) + require.Equal(t, "mssql_local_time_seconds", collectorConfig.Metrics[0].Name) + require.Equal(t, "gauge", collectorConfig.Metrics[0].TypeString) + require.Equal(t, "Local time in seconds since epoch (Unix time).", collectorConfig.Metrics[0].Help) + require.Equal(t, 1, len(collectorConfig.Metrics[0].Values)) + require.Contains(t, collectorConfig.Metrics[0].Values, "unix_time") + require.Equal(t, "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time", collectorConfig.Metrics[0].QueryLiteral) +} + +func TestRiverUnmarshalWithInlineQueryConfigYaml(t *testing.T) { + riverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 3 + max_open_connections = 3 + timeout = "10s" + query_config = "collector_name: mssql_standard\nmetrics:\n- metric_name: mssql_local_time_seconds\n type: gauge\n help: 'Local time in seconds since epoch (Unix time).'\n values: [unix_time]\n query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\""` + + var args Arguments + err := river.Unmarshal([]byte(riverConfig), &args) + require.NoError(t, err) + var collectorConfig config.CollectorConfig + err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) + require.NoError(t, err) + + require.Equal(t, rivertypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) + require.Equal(t, 3, args.MaxIdleConnections) + require.Equal(t, 3, args.MaxOpenConnections) + require.Equal(t, 10*time.Second, args.Timeout) + require.Equal(t, "mssql_standard", collectorConfig.Name) + require.Equal(t, 1, len(collectorConfig.Metrics)) + require.Equal(t, "mssql_local_time_seconds", collectorConfig.Metrics[0].Name) + require.Equal(t, "gauge", collectorConfig.Metrics[0].TypeString) + require.Equal(t, "Local time in seconds since epoch (Unix time).", collectorConfig.Metrics[0].Help) + require.Equal(t, 1, len(collectorConfig.Metrics[0].Values)) + require.Contains(t, collectorConfig.Metrics[0].Values, "unix_time") + require.Equal(t, "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time", collectorConfig.Metrics[0].QueryLiteral) +} + func TestUnmarshalInvalid(t *testing.T) { invalidRiverConfig := ` connection_string = "sqlserver://user:pass@localhost:1433" @@ -44,6 +103,37 @@ func TestUnmarshalInvalid(t *testing.T) { var invalidArgs Arguments err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) require.Error(t, err) + require.EqualError(t, err, "timeout must be positive") +} + +func TestUnmarshalInvalidQueryConfigYaml(t *testing.T) { + invalidRiverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 1 + max_open_connections = 1 + timeout = "1s" + query_config = "{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\" }" + ` + + var invalidArgs Arguments + err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + require.Error(t, err) + require.EqualError(t, err, "invalid query_config: yaml: line 1: did not find expected ',' or ']'") +} + +func TestUnmarshalInvalidProperty(t *testing.T) { + invalidRiverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 1 + max_open_connections = 1 + timeout = "1s" + query_config = "collector_name: mssql_standard\nbad_param: true\nmetrics:\n- metric_name: mssql_local_time_seconds\n type: gauge\n help: 'Local time in seconds since epoch (Unix time).'\n values: [unix_time]\n query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\"" + ` + + var invalidArgs Arguments + err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + require.Error(t, err) + require.EqualError(t, err, "invalid query_config: unknown fields in collector: bad_param") } func TestArgumentsValidate(t *testing.T) { @@ -89,6 +179,9 @@ func TestArgumentsValidate(t *testing.T) { MaxIdleConnections: 1, MaxOpenConnections: 1, Timeout: 10 * time.Second, + QueryConfig: rivertypes.OptionalSecret{ + Value: `{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time" } ] }`, + }, }, wantErr: false, }, @@ -107,20 +200,31 @@ func TestArgumentsValidate(t *testing.T) { } func TestConvert(t *testing.T) { - riverConfig := ` - connection_string = "sqlserver://user:pass@localhost:1433" - ` - var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) - require.NoError(t, err) + strQueryConfig := `collector_name: mssql_standard +metrics: +- metric_name: mssql_local_time_seconds + type: gauge + help: 'Local time in seconds since epoch (Unix time).' + values: [unix_time] + query: "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time"` + args := Arguments{ + ConnectionString: rivertypes.Secret("sqlserver://user:pass@localhost:1433"), + MaxIdleConnections: 1, + MaxOpenConnections: 1, + Timeout: 10 * time.Second, + QueryConfig: rivertypes.OptionalSecret{ + Value: strQueryConfig, + }, + } res := args.Convert() expected := mssql.Config{ ConnectionString: config_util.Secret("sqlserver://user:pass@localhost:1433"), - MaxIdleConnections: DefaultArguments.MaxIdleConnections, - MaxOpenConnections: DefaultArguments.MaxOpenConnections, - Timeout: DefaultArguments.Timeout, + MaxIdleConnections: 1, + MaxOpenConnections: 1, + Timeout: 10 * time.Second, + QueryConfig: []byte(strQueryConfig), } require.Equal(t, expected, *res) } diff --git a/component/prometheus/exporter/mysql/mysql.go b/component/prometheus/exporter/mysql/mysql.go index 90201a450a36..b23f3e170394 100644 --- a/component/prometheus/exporter/mysql/mysql.go +++ b/component/prometheus/exporter/mysql/mysql.go @@ -12,11 +12,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.mysql", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "mysql"), + Name: "prometheus.exporter.mysql", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "mysql"), }) } diff --git a/component/prometheus/exporter/oracledb/oracledb.go b/component/prometheus/exporter/oracledb/oracledb.go index de3fd28ac90b..60926d445fbb 100644 --- a/component/prometheus/exporter/oracledb/oracledb.go +++ b/component/prometheus/exporter/oracledb/oracledb.go @@ -15,11 +15,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.oracledb", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "oracledb"), + Name: "prometheus.exporter.oracledb", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "oracledb"), }) } diff --git a/component/prometheus/exporter/postgres/postgres.go b/component/prometheus/exporter/postgres/postgres.go index 97f940f910bf..9a3f170c1734 100644 --- a/component/prometheus/exporter/postgres/postgres.go +++ b/component/prometheus/exporter/postgres/postgres.go @@ -15,11 +15,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.postgres", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "postgres"), + Name: "prometheus.exporter.postgres", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "postgres"), }) } diff --git a/component/prometheus/exporter/process/process.go b/component/prometheus/exporter/process/process.go index 89aa613d1fae..6d8109d76fd8 100644 --- a/component/prometheus/exporter/process/process.go +++ b/component/prometheus/exporter/process/process.go @@ -10,11 +10,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.process", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createIntegration, "process"), + Name: "prometheus.exporter.process", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createIntegration, "process"), }) } diff --git a/component/prometheus/exporter/redis/redis.go b/component/prometheus/exporter/redis/redis.go index c6fc45822bf3..3522b07fb78e 100644 --- a/component/prometheus/exporter/redis/redis.go +++ b/component/prometheus/exporter/redis/redis.go @@ -15,11 +15,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.redis", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "redis"), + Name: "prometheus.exporter.redis", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "redis"), }) } diff --git a/component/prometheus/exporter/services.go b/component/prometheus/exporter/services.go deleted file mode 100644 index 272c1fde2590..000000000000 --- a/component/prometheus/exporter/services.go +++ /dev/null @@ -1,20 +0,0 @@ -package exporter - -import ( - "github.com/grafana/agent/service/http" - "golang.org/x/exp/maps" -) - -// RequiredServices returns the set of services needed by all -// prometheus.exporter components. Callers may optionally pass in additional -// services to add to the returned list. -func RequiredServices(additionalServices ...string) []string { - services := map[string]struct{}{ - http.ServiceName: {}, - } - for _, svc := range additionalServices { - services[svc] = struct{}{} - } - - return maps.Keys(services) -} diff --git a/component/prometheus/exporter/snmp/snmp.go b/component/prometheus/exporter/snmp/snmp.go index e34feff275db..a050c331a85b 100644 --- a/component/prometheus/exporter/snmp/snmp.go +++ b/component/prometheus/exporter/snmp/snmp.go @@ -17,11 +17,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.snmp", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.NewWithTargetBuilder(createExporter, "snmp", buildSNMPTargets), + Name: "prometheus.exporter.snmp", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.NewWithTargetBuilder(createExporter, "snmp", buildSNMPTargets), }) } diff --git a/component/prometheus/exporter/snowflake/snowflake.go b/component/prometheus/exporter/snowflake/snowflake.go index 4b2314036062..0da475d356d5 100644 --- a/component/prometheus/exporter/snowflake/snowflake.go +++ b/component/prometheus/exporter/snowflake/snowflake.go @@ -11,11 +11,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.snowflake", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "snowflake"), + Name: "prometheus.exporter.snowflake", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "snowflake"), }) } diff --git a/component/prometheus/exporter/squid/squid.go b/component/prometheus/exporter/squid/squid.go index 00449a8cba00..4af71e076a1c 100644 --- a/component/prometheus/exporter/squid/squid.go +++ b/component/prometheus/exporter/squid/squid.go @@ -13,11 +13,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.squid", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "squid"), + Name: "prometheus.exporter.squid", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "squid"), }) } diff --git a/component/prometheus/exporter/statsd/statsd.go b/component/prometheus/exporter/statsd/statsd.go index 10c308077e23..3d7b2c0dafa1 100644 --- a/component/prometheus/exporter/statsd/statsd.go +++ b/component/prometheus/exporter/statsd/statsd.go @@ -8,11 +8,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.statsd", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "statsd"), + Name: "prometheus.exporter.statsd", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "statsd"), }) } diff --git a/component/prometheus/exporter/unix/unix.go b/component/prometheus/exporter/unix/unix.go index e33fa9a63136..b1c3af6cb859 100644 --- a/component/prometheus/exporter/unix/unix.go +++ b/component/prometheus/exporter/unix/unix.go @@ -8,11 +8,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.unix", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "unix"), + Name: "prometheus.exporter.unix", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "unix"), }) } diff --git a/component/prometheus/exporter/vsphere/vsphere.go b/component/prometheus/exporter/vsphere/vsphere.go index d50894b5e446..e8cd625c3cdf 100644 --- a/component/prometheus/exporter/vsphere/vsphere.go +++ b/component/prometheus/exporter/vsphere/vsphere.go @@ -13,11 +13,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.vsphere", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "vsphere"), + Name: "prometheus.exporter.vsphere", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "vsphere"), }) } diff --git a/component/prometheus/exporter/windows/config.go b/component/prometheus/exporter/windows/config.go index 674d355011c5..cc4cb20e4b17 100644 --- a/component/prometheus/exporter/windows/config.go +++ b/component/prometheus/exporter/windows/config.go @@ -6,72 +6,6 @@ import ( windows_integration "github.com/grafana/agent/pkg/integrations/windows_exporter" ) -// DefaultArguments holds non-zero default options for Arguments when it is -// unmarshaled from YAML. -// -// Some defaults are populated from init functions in the github.com/grafana/agent/pkg/integrations/windows_exporter package. - -var DefaultArguments = Arguments{ - EnabledCollectors: strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), - Dfsr: DfsrConfig{ - SourcesEnabled: strings.Split(windows_integration.DefaultConfig.Dfsr.SourcesEnabled, ","), - }, - Exchange: ExchangeConfig{ - EnabledList: strings.Split(windows_integration.DefaultConfig.Exchange.EnabledList, ","), - }, - IIS: IISConfig{ - AppBlackList: windows_integration.DefaultConfig.IIS.AppBlackList, - AppWhiteList: windows_integration.DefaultConfig.IIS.AppWhiteList, - SiteBlackList: windows_integration.DefaultConfig.IIS.SiteBlackList, - SiteWhiteList: windows_integration.DefaultConfig.IIS.SiteWhiteList, - AppInclude: windows_integration.DefaultConfig.IIS.AppInclude, - AppExclude: windows_integration.DefaultConfig.IIS.AppExclude, - SiteInclude: windows_integration.DefaultConfig.IIS.SiteInclude, - SiteExclude: windows_integration.DefaultConfig.IIS.SiteExclude, - }, - LogicalDisk: LogicalDiskConfig{ - BlackList: windows_integration.DefaultConfig.LogicalDisk.BlackList, - WhiteList: windows_integration.DefaultConfig.LogicalDisk.WhiteList, - Include: windows_integration.DefaultConfig.LogicalDisk.Include, - Exclude: windows_integration.DefaultConfig.LogicalDisk.Exclude, - }, - MSMQ: MSMQConfig{ - Where: windows_integration.DefaultConfig.MSMQ.Where, - }, - MSSQL: MSSQLConfig{ - EnabledClasses: strings.Split(windows_integration.DefaultConfig.MSSQL.EnabledClasses, ","), - }, - Network: NetworkConfig{ - BlackList: windows_integration.DefaultConfig.Network.BlackList, - WhiteList: windows_integration.DefaultConfig.Network.WhiteList, - Include: windows_integration.DefaultConfig.Network.Include, - Exclude: windows_integration.DefaultConfig.Network.Exclude, - }, - Process: ProcessConfig{ - BlackList: windows_integration.DefaultConfig.Process.BlackList, - WhiteList: windows_integration.DefaultConfig.Process.WhiteList, - Include: windows_integration.DefaultConfig.Process.Include, - Exclude: windows_integration.DefaultConfig.Process.Exclude, - }, - ScheduledTask: ScheduledTaskConfig{ - Include: windows_integration.DefaultConfig.ScheduledTask.Include, - Exclude: windows_integration.DefaultConfig.ScheduledTask.Exclude, - }, - Service: ServiceConfig{ - UseApi: windows_integration.DefaultConfig.Service.UseApi, - Where: windows_integration.DefaultConfig.Service.Where, - }, - SMTP: SMTPConfig{ - BlackList: windows_integration.DefaultConfig.SMTP.BlackList, - WhiteList: windows_integration.DefaultConfig.SMTP.WhiteList, - Include: windows_integration.DefaultConfig.SMTP.Include, - Exclude: windows_integration.DefaultConfig.SMTP.Exclude, - }, - TextFile: TextFileConfig{ - TextFileDirectory: windows_integration.DefaultConfig.TextFile.TextFileDirectory, - }, -} - // Arguments is used for controlling for this exporter. type Arguments struct { // Collectors to mark as enabled @@ -92,11 +26,6 @@ type Arguments struct { TextFile TextFileConfig `river:"text_file,block,optional"` } -// SetToDefault implements river.Defaulter. -func (a *Arguments) SetToDefault() { - *a = DefaultArguments -} - // Convert converts the component's Arguments to the integration's Config. func (a *Arguments) Convert() *windows_integration.Config { return &windows_integration.Config{ diff --git a/component/prometheus/exporter/windows/config_default_windows_test.go b/component/prometheus/exporter/windows/config_default_windows_test.go index c17f6e33fa60..9fddd1d635eb 100644 --- a/component/prometheus/exporter/windows/config_default_windows_test.go +++ b/component/prometheus/exporter/windows/config_default_windows_test.go @@ -1,10 +1,8 @@ package windows import ( - "strings" "testing" - windows_integration "github.com/grafana/agent/pkg/integrations/windows_exporter" "github.com/grafana/river" "github.com/stretchr/testify/require" ) @@ -14,26 +12,26 @@ func TestRiverUnmarshalWithDefaultConfig(t *testing.T) { err := river.Unmarshal([]byte(""), &args) require.NoError(t, err) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), args.EnabledCollectors) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.Dfsr.SourcesEnabled, ","), args.Dfsr.SourcesEnabled) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.Exchange.EnabledList, ","), args.Exchange.EnabledList) - require.Equal(t, windows_integration.DefaultConfig.IIS.AppExclude, args.IIS.AppExclude) - require.Equal(t, windows_integration.DefaultConfig.IIS.AppInclude, args.IIS.AppInclude) - require.Equal(t, windows_integration.DefaultConfig.IIS.SiteExclude, args.IIS.SiteExclude) - require.Equal(t, windows_integration.DefaultConfig.IIS.SiteInclude, args.IIS.SiteInclude) - require.Equal(t, windows_integration.DefaultConfig.LogicalDisk.Exclude, args.LogicalDisk.Exclude) - require.Equal(t, windows_integration.DefaultConfig.LogicalDisk.Include, args.LogicalDisk.Include) - require.Equal(t, windows_integration.DefaultConfig.MSMQ.Where, args.MSMQ.Where) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.MSSQL.EnabledClasses, ","), args.MSSQL.EnabledClasses) - require.Equal(t, windows_integration.DefaultConfig.Network.Exclude, args.Network.Exclude) - require.Equal(t, windows_integration.DefaultConfig.Network.Include, args.Network.Include) - require.Equal(t, windows_integration.DefaultConfig.Process.Exclude, args.Process.Exclude) - require.Equal(t, windows_integration.DefaultConfig.Process.Include, args.Process.Include) - require.Equal(t, windows_integration.DefaultConfig.ScheduledTask.Exclude, args.ScheduledTask.Exclude) - require.Equal(t, windows_integration.DefaultConfig.ScheduledTask.Include, args.ScheduledTask.Include) - require.Equal(t, windows_integration.DefaultConfig.Service.UseApi, args.Service.UseApi) - require.Equal(t, windows_integration.DefaultConfig.Service.Where, args.Service.Where) - require.Equal(t, windows_integration.DefaultConfig.SMTP.Exclude, args.SMTP.Exclude) - require.Equal(t, windows_integration.DefaultConfig.SMTP.Include, args.SMTP.Include) - require.Equal(t, windows_integration.DefaultConfig.TextFile.TextFileDirectory, args.TextFile.TextFileDirectory) + require.Equal(t, DefaultArguments.EnabledCollectors, args.EnabledCollectors) + require.Equal(t, DefaultArguments.Dfsr.SourcesEnabled, args.Dfsr.SourcesEnabled) + require.Equal(t, DefaultArguments.Exchange.EnabledList, args.Exchange.EnabledList) + require.Equal(t, DefaultArguments.IIS.AppExclude, args.IIS.AppExclude) + require.Equal(t, DefaultArguments.IIS.AppInclude, args.IIS.AppInclude) + require.Equal(t, DefaultArguments.IIS.SiteExclude, args.IIS.SiteExclude) + require.Equal(t, DefaultArguments.IIS.SiteInclude, args.IIS.SiteInclude) + require.Equal(t, DefaultArguments.LogicalDisk.Exclude, args.LogicalDisk.Exclude) + require.Equal(t, DefaultArguments.LogicalDisk.Include, args.LogicalDisk.Include) + require.Equal(t, DefaultArguments.MSMQ.Where, args.MSMQ.Where) + require.Equal(t, DefaultArguments.MSSQL.EnabledClasses, args.MSSQL.EnabledClasses) + require.Equal(t, DefaultArguments.Network.Exclude, args.Network.Exclude) + require.Equal(t, DefaultArguments.Network.Include, args.Network.Include) + require.Equal(t, DefaultArguments.Process.Exclude, args.Process.Exclude) + require.Equal(t, DefaultArguments.Process.Include, args.Process.Include) + require.Equal(t, DefaultArguments.ScheduledTask.Exclude, args.ScheduledTask.Exclude) + require.Equal(t, DefaultArguments.ScheduledTask.Include, args.ScheduledTask.Include) + require.Equal(t, DefaultArguments.Service.UseApi, args.Service.UseApi) + require.Equal(t, DefaultArguments.Service.Where, args.Service.Where) + require.Equal(t, DefaultArguments.SMTP.Exclude, args.SMTP.Exclude) + require.Equal(t, DefaultArguments.SMTP.Include, args.SMTP.Include) + require.Equal(t, DefaultArguments.TextFile.TextFileDirectory, args.TextFile.TextFileDirectory) } diff --git a/component/prometheus/exporter/windows/config_windows.go b/component/prometheus/exporter/windows/config_windows.go new file mode 100644 index 000000000000..b634788eda8c --- /dev/null +++ b/component/prometheus/exporter/windows/config_windows.go @@ -0,0 +1,75 @@ +package windows + +import ( + windows_integration "github.com/grafana/agent/pkg/integrations/windows_exporter" + col "github.com/prometheus-community/windows_exporter/pkg/collector" + "strings" +) + +// DefaultArguments holds non-zero default options for Arguments when it is +// unmarshaled from YAML. +var DefaultArguments = Arguments{ + EnabledCollectors: strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), + Dfsr: DfsrConfig{ + SourcesEnabled: strings.Split(col.ConfigDefaults.Dfsr.DfsrEnabledCollectors, ","), + }, + Exchange: ExchangeConfig{ + EnabledList: strings.Split(col.ConfigDefaults.Exchange.CollectorsEnabled, ","), + }, + IIS: IISConfig{ + AppBlackList: col.ConfigDefaults.Iis.AppExclude, + AppWhiteList: col.ConfigDefaults.Iis.AppInclude, + SiteBlackList: col.ConfigDefaults.Iis.SiteExclude, + SiteWhiteList: col.ConfigDefaults.Iis.SiteInclude, + AppInclude: col.ConfigDefaults.Iis.AppInclude, + AppExclude: col.ConfigDefaults.Iis.AppExclude, + SiteInclude: col.ConfigDefaults.Iis.SiteInclude, + SiteExclude: col.ConfigDefaults.Iis.SiteExclude, + }, + LogicalDisk: LogicalDiskConfig{ + BlackList: col.ConfigDefaults.LogicalDisk.VolumeExclude, + WhiteList: col.ConfigDefaults.LogicalDisk.VolumeInclude, + Include: col.ConfigDefaults.LogicalDisk.VolumeInclude, + Exclude: col.ConfigDefaults.LogicalDisk.VolumeExclude, + }, + MSMQ: MSMQConfig{ + Where: col.ConfigDefaults.Msmq.QueryWhereClause, + }, + MSSQL: MSSQLConfig{ + EnabledClasses: strings.Split(col.ConfigDefaults.Mssql.EnabledCollectors, ","), + }, + Network: NetworkConfig{ + BlackList: col.ConfigDefaults.Net.NicExclude, + WhiteList: col.ConfigDefaults.Net.NicInclude, + Include: col.ConfigDefaults.Net.NicInclude, + Exclude: col.ConfigDefaults.Net.NicExclude, + }, + Process: ProcessConfig{ + BlackList: col.ConfigDefaults.Process.ProcessExclude, + WhiteList: col.ConfigDefaults.Process.ProcessInclude, + Include: col.ConfigDefaults.Process.ProcessInclude, + Exclude: col.ConfigDefaults.Process.ProcessExclude, + }, + ScheduledTask: ScheduledTaskConfig{ + Include: col.ConfigDefaults.ScheduledTask.TaskInclude, + Exclude: col.ConfigDefaults.ScheduledTask.TaskExclude, + }, + Service: ServiceConfig{ + UseApi: "false", + Where: col.ConfigDefaults.Service.ServiceWhereClause, + }, + SMTP: SMTPConfig{ + BlackList: col.ConfigDefaults.Smtp.ServerExclude, + WhiteList: col.ConfigDefaults.Smtp.ServerInclude, + Include: col.ConfigDefaults.Smtp.ServerInclude, + Exclude: col.ConfigDefaults.Smtp.ServerExclude, + }, + TextFile: TextFileConfig{ + TextFileDirectory: col.ConfigDefaults.Textfile.TextFileDirectories, + }, +} + +// SetToDefault implements river.Defaulter. +func (a *Arguments) SetToDefault() { + *a = DefaultArguments +} diff --git a/component/prometheus/exporter/windows/windows.go b/component/prometheus/exporter/windows/windows.go index 214302748aee..5f05d3cc63e7 100644 --- a/component/prometheus/exporter/windows/windows.go +++ b/component/prometheus/exporter/windows/windows.go @@ -8,11 +8,11 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.exporter.windows", - Args: Arguments{}, - Exports: exporter.Exports{}, - NeedsServices: exporter.RequiredServices(), - Build: exporter.New(createExporter, "windows"), + Name: "prometheus.exporter.windows", + Args: Arguments{}, + Exports: exporter.Exports{}, + + Build: exporter.New(createExporter, "windows"), }) } diff --git a/component/prometheus/fanout.go b/component/prometheus/fanout.go index c224ad3faf9a..32055c01cee7 100644 --- a/component/prometheus/fanout.go +++ b/component/prometheus/fanout.go @@ -6,16 +6,14 @@ import ( "time" "github.com/grafana/agent/service/labelstore" - "github.com/prometheus/client_golang/prometheus" - "github.com/hashicorp/go-multierror" - + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" ) @@ -112,6 +110,12 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo if ref == 0 { ref = storage.SeriesRef(a.ls.GetOrAddGlobalRefID(l)) } + if value.IsStaleNaN(v) { + a.ls.AddStaleMarker(uint64(ref), l) + } else { + // Tested this to ensure it had no cpu impact, since it is called so often. + a.ls.RemoveStaleMarker(uint64(ref)) + } var multiErr error updated := false for _, x := range a.children { diff --git a/component/prometheus/fanout_test.go b/component/prometheus/fanout_test.go index 3090b45b7a3a..14a4636cc359 100644 --- a/component/prometheus/fanout_test.go +++ b/component/prometheus/fanout_test.go @@ -14,7 +14,7 @@ import ( ) func TestRollback(t *testing.T) { - ls := labelstore.New(nil) + ls := labelstore.New(nil, prometheus.DefaultRegisterer) fanout := NewFanout([]storage.Appendable{NewFanout(nil, "1", prometheus.DefaultRegisterer, ls)}, "", prometheus.DefaultRegisterer, ls) app := fanout.Appender(context.Background()) err := app.Rollback() @@ -22,7 +22,7 @@ func TestRollback(t *testing.T) { } func TestCommit(t *testing.T) { - ls := labelstore.New(nil) + ls := labelstore.New(nil, prometheus.DefaultRegisterer) fanout := NewFanout([]storage.Appendable{NewFanout(nil, "1", prometheus.DefaultRegisterer, ls)}, "", prometheus.DefaultRegisterer, ls) app := fanout.Appender(context.Background()) err := app.Commit() diff --git a/component/prometheus/interceptor.go b/component/prometheus/interceptor.go index 7fc10f06b8f3..c9b88fc6785a 100644 --- a/component/prometheus/interceptor.go +++ b/component/prometheus/interceptor.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" ) @@ -102,6 +103,13 @@ func (a *interceptappender) Append(ref storage.SeriesRef, l labels.Labels, t int ref = storage.SeriesRef(a.ls.GetOrAddGlobalRefID(l)) } + if value.IsStaleNaN(v) { + a.ls.AddStaleMarker(uint64(ref), l) + } else { + // Tested this to ensure it had no cpu impact, since it is called so often. + a.ls.RemoveStaleMarker(uint64(ref)) + } + if a.interceptor.onAppend != nil { return a.interceptor.onAppend(ref, l, t, v, a.child) } diff --git a/component/prometheus/operator/common/crdmanager.go b/component/prometheus/operator/common/crdmanager.go index 9f8bd55f79f6..85f13719e970 100644 --- a/component/prometheus/operator/common/crdmanager.go +++ b/component/prometheus/operator/common/crdmanager.go @@ -42,12 +42,19 @@ const informerSyncTimeout = 10 * time.Second // crdManager is all of the fields required to run a crd based component. // on update, this entire thing should be recreated and restarted type crdManager struct { - mut sync.Mutex - discoveryConfigs map[string]discovery.Configs - scrapeConfigs map[string]*config.ScrapeConfig - debugInfo map[string]*operator.DiscoveredResource - discoveryManager *discovery.Manager - scrapeManager *scrape.Manager + mut sync.Mutex + + // these maps are keyed by job name + discoveryConfigs map[string]discovery.Configs + scrapeConfigs map[string]*config.ScrapeConfig + + // list of keys to the above maps for a given resource by `ns/name` + crdsToMapKeys map[string][]string + // debug info by `kind/ns/name` + debugInfo map[string]*operator.DiscoveredResource + + discoveryManager discoveryManager + scrapeManager scrapeManager clusteringUpdated chan struct{} ls labelstore.LabelStore @@ -80,6 +87,7 @@ func newCrdManager(opts component.Options, cluster cluster.Cluster, logger log.L cluster: cluster, discoveryConfigs: map[string]discovery.Configs{}, scrapeConfigs: map[string]*config.ScrapeConfig{}, + crdsToMapKeys: map[string][]string{}, debugInfo: map[string]*operator.DiscoveredResource{}, kind: kind, clusteringUpdated: make(chan struct{}, 1), @@ -392,6 +400,7 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { AdditionalRelabelConfigs: c.args.RelabelConfigs, ScrapeOptions: c.args.Scrape, } + mapKeys := []string{} for i, ep := range pm.Spec.PodMetricsEndpoints { var scrapeConfig *config.ScrapeConfig scrapeConfig, err = gen.GeneratePodMonitorConfig(pm, ep, i) @@ -400,6 +409,7 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { level.Error(c.logger).Log("name", pm.Name, "err", err, "msg", "error generating scrapeconfig from podmonitor") break } + mapKeys = append(mapKeys, scrapeConfig.JobName) c.mut.Lock() c.discoveryConfigs[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs c.scrapeConfigs[scrapeConfig.JobName] = scrapeConfig @@ -409,6 +419,9 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { c.addDebugInfo(pm.Namespace, pm.Name, err) return } + c.mut.Lock() + c.crdsToMapKeys[fmt.Sprintf("%s/%s", pm.Namespace, pm.Name)] = mapKeys + c.mut.Unlock() if err = c.apply(); err != nil { level.Error(c.logger).Log("name", pm.Name, "err", err, "msg", "error applying scrape configs from "+c.kind) } @@ -442,6 +455,8 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { AdditionalRelabelConfigs: c.args.RelabelConfigs, ScrapeOptions: c.args.Scrape, } + + mapKeys := []string{} for i, ep := range sm.Spec.Endpoints { var scrapeConfig *config.ScrapeConfig scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i) @@ -450,6 +465,7 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error generating scrapeconfig from serviceMonitor") break } + mapKeys = append(mapKeys, scrapeConfig.JobName) c.mut.Lock() c.discoveryConfigs[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs c.scrapeConfigs[scrapeConfig.JobName] = scrapeConfig @@ -459,6 +475,9 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { c.addDebugInfo(sm.Namespace, sm.Name, err) return } + c.mut.Lock() + c.crdsToMapKeys[fmt.Sprintf("%s/%s", sm.Namespace, sm.Name)] = mapKeys + c.mut.Unlock() if err = c.apply(); err != nil { level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error applying scrape configs from "+c.kind) } @@ -503,6 +522,7 @@ func (c *crdManager) addProbe(p *promopv1.Probe) { c.mut.Lock() c.discoveryConfigs[pmc.JobName] = pmc.ServiceDiscoveryConfigs c.scrapeConfigs[pmc.JobName] = pmc + c.crdsToMapKeys[fmt.Sprintf("%s/%s", p.Namespace, p.Name)] = []string{pmc.JobName} c.mut.Unlock() if err = c.apply(); err != nil { @@ -533,12 +553,10 @@ func (c *crdManager) onDeleteProbe(obj interface{}) { func (c *crdManager) clearConfigs(ns, name string) { c.mut.Lock() defer c.mut.Unlock() - prefix := fmt.Sprintf("%s/%s/%s", c.kind, ns, name) - for k := range c.discoveryConfigs { - if strings.HasPrefix(k, prefix) { - delete(c.discoveryConfigs, k) - delete(c.scrapeConfigs, k) - } + + for _, k := range c.crdsToMapKeys[fmt.Sprintf("%s/%s", ns, name)] { + delete(c.discoveryConfigs, k) + delete(c.scrapeConfigs, k) } - delete(c.debugInfo, prefix) + delete(c.debugInfo, fmt.Sprintf("%s/%s/%s", c.kind, ns, name)) } diff --git a/component/prometheus/operator/common/crdmanager_test.go b/component/prometheus/operator/common/crdmanager_test.go new file mode 100644 index 000000000000..c229a2440aac --- /dev/null +++ b/component/prometheus/operator/common/crdmanager_test.go @@ -0,0 +1,169 @@ +package common + +import ( + "testing" + + "golang.org/x/exp/maps" + + "github.com/go-kit/log" + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/prometheus/operator" + "github.com/grafana/agent/service/cluster" + "github.com/grafana/agent/service/labelstore" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/scrape" + "k8s.io/apimachinery/pkg/util/intstr" + + promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stretchr/testify/require" +) + +func TestClearConfigsSameNsSamePrefix(t *testing.T) { + logger := log.NewNopLogger() + m := newCrdManager( + component.Options{ + Logger: logger, + GetServiceData: func(name string) (interface{}, error) { return nil, nil }, + }, + cluster.Mock(), + logger, + &operator.DefaultArguments, + KindServiceMonitor, + labelstore.New(logger, prometheus.DefaultRegisterer), + ) + + m.discoveryManager = newMockDiscoveryManager() + m.scrapeManager = newMockScrapeManager() + + targetPort := intstr.FromInt(9090) + m.onAddServiceMonitor(&promopv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "svcmonitor", + }, + Spec: promopv1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "group": "my-group", + }, + }, + Endpoints: []promopv1.Endpoint{ + { + TargetPort: &targetPort, + ScrapeTimeout: "5s", + Interval: "10s", + }, + }, + }, + }) + m.onAddServiceMonitor(&promopv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "svcmonitor-another", + }, + Spec: promopv1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "group": "my-group", + }, + }, + Endpoints: []promopv1.Endpoint{ + { + TargetPort: &targetPort, + ScrapeTimeout: "5s", + Interval: "10s", + }, + }, + }}) + + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another/0", "serviceMonitor/monitoring/svcmonitor/0"}, maps.Keys(m.discoveryConfigs)) + m.clearConfigs("monitoring", "svcmonitor") + require.ElementsMatch(t, []string{"monitoring/svcmonitor", "monitoring/svcmonitor-another"}, maps.Keys(m.crdsToMapKeys)) + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another/0"}, maps.Keys(m.discoveryConfigs)) + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another"}, maps.Keys(m.debugInfo)) +} + +func TestClearConfigsProbe(t *testing.T) { + logger := log.NewNopLogger() + m := newCrdManager( + component.Options{ + Logger: logger, + GetServiceData: func(name string) (interface{}, error) { return nil, nil }, + }, + cluster.Mock(), + logger, + &operator.DefaultArguments, + KindProbe, + labelstore.New(logger, prometheus.DefaultRegisterer), + ) + + m.discoveryManager = newMockDiscoveryManager() + m.scrapeManager = newMockScrapeManager() + + m.onAddProbe(&promopv1.Probe{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "probe", + }, + Spec: promopv1.ProbeSpec{}, + }) + m.onAddProbe(&promopv1.Probe{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "probe-another", + }, + Spec: promopv1.ProbeSpec{}}) + + require.ElementsMatch(t, []string{"probe/monitoring/probe-another", "probe/monitoring/probe"}, maps.Keys(m.discoveryConfigs)) + m.clearConfigs("monitoring", "probe") + require.ElementsMatch(t, []string{"monitoring/probe", "monitoring/probe-another"}, maps.Keys(m.crdsToMapKeys)) + require.ElementsMatch(t, []string{"probe/monitoring/probe-another"}, maps.Keys(m.discoveryConfigs)) + require.ElementsMatch(t, []string{"probe/monitoring/probe-another"}, maps.Keys(m.debugInfo)) +} + +type mockDiscoveryManager struct { +} + +func newMockDiscoveryManager() *mockDiscoveryManager { + return &mockDiscoveryManager{} +} + +func (m *mockDiscoveryManager) Run() error { + return nil +} + +func (m *mockDiscoveryManager) SyncCh() <-chan map[string][]*targetgroup.Group { + return nil +} + +func (m *mockDiscoveryManager) ApplyConfig(cfg map[string]discovery.Configs) error { + return nil +} + +type mockScrapeManager struct { +} + +func newMockScrapeManager() *mockScrapeManager { + return &mockScrapeManager{} +} + +func (m *mockScrapeManager) Run(tsets <-chan map[string][]*targetgroup.Group) error { + return nil +} + +func (m *mockScrapeManager) Stop() { + +} + +func (m *mockScrapeManager) TargetsActive() map[string][]*scrape.Target { + return nil +} + +func (m *mockScrapeManager) ApplyConfig(cfg *config.Config) error { + return nil +} diff --git a/component/prometheus/operator/common/interfaces.go b/component/prometheus/operator/common/interfaces.go new file mode 100644 index 000000000000..4652154f6dc6 --- /dev/null +++ b/component/prometheus/operator/common/interfaces.go @@ -0,0 +1,23 @@ +package common + +import ( + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/scrape" +) + +// discoveryManager is an interface around discovery.Manager +type discoveryManager interface { + Run() error + SyncCh() <-chan map[string][]*targetgroup.Group + ApplyConfig(cfg map[string]discovery.Configs) error +} + +// scrapeManager is an interface around scrape.Manager +type scrapeManager interface { + Run(tsets <-chan map[string][]*targetgroup.Group) error + Stop() + TargetsActive() map[string][]*scrape.Target + ApplyConfig(cfg *config.Config) error +} diff --git a/component/prometheus/operator/podmonitors/operator.go b/component/prometheus/operator/podmonitors/operator.go index 41fb781f5db4..ea41d6f0fe27 100644 --- a/component/prometheus/operator/podmonitors/operator.go +++ b/component/prometheus/operator/podmonitors/operator.go @@ -4,16 +4,12 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/prometheus/operator" "github.com/grafana/agent/component/prometheus/operator/common" - "github.com/grafana/agent/service/cluster" - "github.com/grafana/agent/service/http" - "github.com/grafana/agent/service/labelstore" ) func init() { component.Register(component.Registration{ - Name: "prometheus.operator.podmonitors", - Args: operator.Arguments{}, - NeedsServices: []string{cluster.ServiceName, http.ServiceName, labelstore.ServiceName}, + Name: "prometheus.operator.podmonitors", + Args: operator.Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return common.New(opts, args, common.KindPodMonitor) diff --git a/component/prometheus/operator/probes/probes.go b/component/prometheus/operator/probes/probes.go index 00dad3fd9821..a8d96b428489 100644 --- a/component/prometheus/operator/probes/probes.go +++ b/component/prometheus/operator/probes/probes.go @@ -4,16 +4,12 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/prometheus/operator" "github.com/grafana/agent/component/prometheus/operator/common" - "github.com/grafana/agent/service/cluster" - "github.com/grafana/agent/service/http" - "github.com/grafana/agent/service/labelstore" ) func init() { component.Register(component.Registration{ - Name: "prometheus.operator.probes", - Args: operator.Arguments{}, - NeedsServices: []string{cluster.ServiceName, http.ServiceName, labelstore.ServiceName}, + Name: "prometheus.operator.probes", + Args: operator.Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return common.New(opts, args, common.KindProbe) diff --git a/component/prometheus/operator/servicemonitors/servicemonitors.go b/component/prometheus/operator/servicemonitors/servicemonitors.go index 8df947f10883..55c1e34c2bbe 100644 --- a/component/prometheus/operator/servicemonitors/servicemonitors.go +++ b/component/prometheus/operator/servicemonitors/servicemonitors.go @@ -4,16 +4,12 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/prometheus/operator" "github.com/grafana/agent/component/prometheus/operator/common" - "github.com/grafana/agent/service/cluster" - "github.com/grafana/agent/service/http" - "github.com/grafana/agent/service/labelstore" ) func init() { component.Register(component.Registration{ - Name: "prometheus.operator.servicemonitors", - Args: operator.Arguments{}, - NeedsServices: []string{cluster.ServiceName, http.ServiceName, labelstore.ServiceName}, + Name: "prometheus.operator.servicemonitors", + Args: operator.Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return common.New(opts, args, common.KindServiceMonitor) diff --git a/component/prometheus/receive_http/receive_http.go b/component/prometheus/receive_http/receive_http.go index 5fc0abb91b1c..3e78e1e7472c 100644 --- a/component/prometheus/receive_http/receive_http.go +++ b/component/prometheus/receive_http/receive_http.go @@ -21,9 +21,9 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.receive_http", - Args: Arguments{}, - NeedsServices: []string{labelstore.ServiceName}, + Name: "prometheus.receive_http", + Args: Arguments{}, + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) }, diff --git a/component/prometheus/receive_http/receive_http_test.go b/component/prometheus/receive_http/receive_http_test.go index bf947ca4d2b3..a0c4fe7dd6d9 100644 --- a/component/prometheus/receive_http/receive_http_test.go +++ b/component/prometheus/receive_http/receive_http_test.go @@ -348,7 +348,7 @@ func testAppendable(actualSamples chan testSample) []storage.Appendable { return ref, nil } - ls := labelstore.New(nil) + ls := labelstore.New(nil, prometheus.DefaultRegisterer) return []storage.Appendable{agentprom.NewInterceptor( nil, ls, @@ -385,7 +385,7 @@ func testOptions(t *testing.T) component.Options { Logger: util.TestFlowLogger(t), Registerer: prometheus.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { - return labelstore.New(nil), nil + return labelstore.New(nil, prometheus.DefaultRegisterer), nil }, } } diff --git a/component/prometheus/relabel/relabel.go b/component/prometheus/relabel/relabel.go index 2f8fd5ab077c..0f3b0c6dc9ac 100644 --- a/component/prometheus/relabel/relabel.go +++ b/component/prometheus/relabel/relabel.go @@ -26,10 +26,10 @@ import ( func init() { component.Register(component.Registration{ - Name: "prometheus.relabel", - Args: Arguments{}, - Exports: Exports{}, - NeedsServices: []string{labelstore.ServiceName}, + Name: "prometheus.relabel", + Args: Arguments{}, + Exports: Exports{}, + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) }, @@ -46,15 +46,23 @@ type Arguments struct { MetricRelabelConfigs []*flow_relabel.Config `river:"rule,block,optional"` // Cache size to use for LRU cache. - //CacheSize int `river:"cache_size,attr,optional"` + CacheSize int `river:"max_cache_size,attr,optional"` } // SetToDefault implements river.Defaulter. -/*func (arg *Arguments) SetToDefault() { +func (arg *Arguments) SetToDefault() { *arg = Arguments{ - CacheSize: 500_000, + CacheSize: 100_000, + } +} + +// Validate implements river.Validator. +func (arg *Arguments) Validate() error { + if arg.CacheSize <= 0 { + return fmt.Errorf("max_cache_size must be greater than 0 and is %d", arg.CacheSize) } -}*/ + return nil +} // Exports holds values which are exported by the prometheus.relabel component. type Exports struct { @@ -88,7 +96,7 @@ var ( // New creates a new prometheus.relabel component. func New(o component.Options, args Arguments) (*Component, error) { - cache, err := lru.New[uint64, *labelAndID](100_000) + cache, err := lru.New[uint64, *labelAndID](args.CacheSize) if err != nil { return nil, err } @@ -210,7 +218,7 @@ func (c *Component) Update(args component.Arguments) error { defer c.mut.Unlock() newArgs := args.(Arguments) - c.clearCache(100_000) + c.clearCache(newArgs.CacheSize) c.mrc = flow_relabel.ComponentToPromRelabelConfigs(newArgs.MetricRelabelConfigs) c.fanout.UpdateChildren(newArgs.ForwardTo) diff --git a/component/prometheus/relabel/relabel_test.go b/component/prometheus/relabel/relabel_test.go index e6846b4e944f..d029498555b8 100644 --- a/component/prometheus/relabel/relabel_test.go +++ b/component/prometheus/relabel/relabel_test.go @@ -24,7 +24,7 @@ import ( ) func TestCache(t *testing.T) { - lc := labelstore.New(nil) + lc := labelstore.New(nil, prom.DefaultRegisterer) relabeller := generateRelabel(t) lbls := labels.FromStrings("__address__", "localhost") relabeller.relabel(0, lbls) @@ -44,13 +44,24 @@ func TestUpdateReset(t *testing.T) { relabeller.relabel(0, lbls) require.True(t, relabeller.cache.Len() == 1) _ = relabeller.Update(Arguments{ + CacheSize: 100000, MetricRelabelConfigs: []*flow_relabel.Config{}, }) require.True(t, relabeller.cache.Len() == 0) } +func TestValidator(t *testing.T) { + args := Arguments{CacheSize: 0} + err := args.Validate() + require.Error(t, err) + + args.CacheSize = 1 + err = args.Validate() + require.NoError(t, err) +} + func TestNil(t *testing.T) { - ls := labelstore.New(nil) + ls := labelstore.New(nil, prom.DefaultRegisterer) fanout := prometheus.NewInterceptor(nil, ls, prometheus.WithAppendHook(func(ref storage.SeriesRef, _ labels.Labels, _ int64, _ float64, _ storage.Appender) (storage.SeriesRef, error) { require.True(t, false) return ref, nil @@ -61,7 +72,7 @@ func TestNil(t *testing.T) { OnStateChange: func(e component.Exports) {}, Registerer: prom.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { - return labelstore.New(nil), nil + return labelstore.New(nil, prom.DefaultRegisterer), nil }, }, Arguments{ ForwardTo: []storage.Appendable{fanout}, @@ -72,6 +83,7 @@ func TestNil(t *testing.T) { Action: "drop", }, }, + CacheSize: 100000, }) require.NotNil(t, relabeller) require.NoError(t, err) @@ -100,7 +112,7 @@ func TestLRUNaN(t *testing.T) { } func BenchmarkCache(b *testing.B) { - ls := labelstore.New(nil) + ls := labelstore.New(nil, prom.DefaultRegisterer) fanout := prometheus.NewInterceptor(nil, ls, prometheus.WithAppendHook(func(ref storage.SeriesRef, l labels.Labels, _ int64, _ float64, _ storage.Appender) (storage.SeriesRef, error) { require.True(b, l.Has("new_label")) return ref, nil @@ -129,7 +141,6 @@ func BenchmarkCache(b *testing.B) { lbls := labels.FromStrings("__address__", "localhost") app := entry.Appender(context.Background()) - for i := 0; i < b.N; i++ { app.Append(0, lbls, time.Now().UnixMilli(), 0) } @@ -137,7 +148,7 @@ func BenchmarkCache(b *testing.B) { } func generateRelabel(t *testing.T) *Component { - ls := labelstore.New(nil) + ls := labelstore.New(nil, prom.DefaultRegisterer) fanout := prometheus.NewInterceptor(nil, ls, prometheus.WithAppendHook(func(ref storage.SeriesRef, l labels.Labels, _ int64, _ float64, _ storage.Appender) (storage.SeriesRef, error) { require.True(t, l.Has("new_label")) return ref, nil @@ -148,7 +159,7 @@ func generateRelabel(t *testing.T) *Component { OnStateChange: func(e component.Exports) {}, Registerer: prom.NewRegistry(), GetServiceData: func(name string) (interface{}, error) { - return labelstore.New(nil), nil + return labelstore.New(nil, prom.DefaultRegisterer), nil }, }, Arguments{ ForwardTo: []storage.Appendable{fanout}, @@ -161,6 +172,7 @@ func generateRelabel(t *testing.T) *Component { Action: "replace", }, }, + CacheSize: 100_000, }) require.NotNil(t, relabeller) require.NoError(t, err) diff --git a/component/prometheus/remotewrite/remote_write.go b/component/prometheus/remotewrite/remote_write.go index d36d5cde8cfe..354e3248450b 100644 --- a/component/prometheus/remotewrite/remote_write.go +++ b/component/prometheus/remotewrite/remote_write.go @@ -21,7 +21,8 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/component" - "github.com/grafana/agent/pkg/build" + "github.com/grafana/agent/internal/agentseed" + "github.com/grafana/agent/internal/useragent" "github.com/grafana/agent/pkg/flow/logging/level" "github.com/grafana/agent/pkg/metrics/wal" "github.com/prometheus/prometheus/model/timestamp" @@ -35,13 +36,13 @@ import ( var remoteFlushDeadline = 1 * time.Minute func init() { - remote.UserAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version) + remote.UserAgent = useragent.Get() component.Register(component.Registration{ - Name: "prometheus.remote_write", - Args: Arguments{}, - Exports: Exports{}, - NeedsServices: []string{labelstore.ServiceName}, + Name: "prometheus.remote_write", + Args: Arguments{}, + Exports: Exports{}, + Build: func(o component.Options, c component.Arguments) (component.Component, error) { return New(o, c.(Arguments)) }, @@ -257,6 +258,13 @@ func (c *Component) Update(newConfig component.Arguments) error { if err != nil { return err } + uid := agentseed.Get().UID + for _, cfg := range convertedConfig.RemoteWriteConfigs { + if cfg.Headers == nil { + cfg.Headers = map[string]string{} + } + cfg.Headers[agentseed.HeaderName] = uid + } err = c.remoteStore.ApplyConfig(convertedConfig) if err != nil { return err diff --git a/component/prometheus/remotewrite/types.go b/component/prometheus/remotewrite/types.go index 39ef6a55a191..637059aba416 100644 --- a/component/prometheus/remotewrite/types.go +++ b/component/prometheus/remotewrite/types.go @@ -35,6 +35,7 @@ var ( MinBackoff: 30 * time.Millisecond, MaxBackoff: 5 * time.Second, RetryOnHTTP429: true, + SampleAgeLimit: 0, } DefaultMetadataOptions = MetadataOptions{ @@ -141,6 +142,7 @@ type QueueOptions struct { MinBackoff time.Duration `river:"min_backoff,attr,optional"` MaxBackoff time.Duration `river:"max_backoff,attr,optional"` RetryOnHTTP429 bool `river:"retry_on_http_429,attr,optional"` + SampleAgeLimit time.Duration `river:"sample_age_limit,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -164,6 +166,7 @@ func (r *QueueOptions) toPrometheusType() config.QueueConfig { MinBackoff: model.Duration(r.MinBackoff), MaxBackoff: model.Duration(r.MaxBackoff), RetryOnRateLimit: r.RetryOnHTTP429, + SampleAgeLimit: model.Duration(r.SampleAgeLimit), } } @@ -231,7 +234,6 @@ func convertConfigs(cfg Arguments) (*config.Config, error) { if err != nil { return nil, fmt.Errorf("cannot parse remote_write url %q: %w", rw.URL, err) } - rwConfigs = append(rwConfigs, &config.RemoteWriteConfig{ URL: &common.URL{URL: parsedURL}, RemoteTimeout: model.Duration(rw.RemoteTimeout), diff --git a/component/prometheus/scrape/scrape.go b/component/prometheus/scrape/scrape.go index 750f71bd9aa0..8db6fc27d47e 100644 --- a/component/prometheus/scrape/scrape.go +++ b/component/prometheus/scrape/scrape.go @@ -12,7 +12,7 @@ import ( component_config "github.com/grafana/agent/component/common/config" "github.com/grafana/agent/component/discovery" "github.com/grafana/agent/component/prometheus" - "github.com/grafana/agent/pkg/build" + "github.com/grafana/agent/internal/useragent" "github.com/grafana/agent/pkg/flow/logging/level" "github.com/grafana/agent/service/cluster" "github.com/grafana/agent/service/http" @@ -27,12 +27,12 @@ import ( ) func init() { - scrape.UserAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version) + scrape.UserAgent = useragent.Get() component.Register(component.Registration{ - Name: "prometheus.scrape", - Args: Arguments{}, - NeedsServices: []string{http.ServiceName, cluster.ServiceName, labelstore.ServiceName}, + Name: "prometheus.scrape", + Args: Arguments{}, + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) }, diff --git a/component/prometheus/scrape/scrape_test.go b/component/prometheus/scrape/scrape_test.go index 6b51ecc154fa..3a5ea459bcce 100644 --- a/component/prometheus/scrape/scrape_test.go +++ b/component/prometheus/scrape/scrape_test.go @@ -87,7 +87,7 @@ func TestForwardingToAppendable(t *testing.T) { case cluster.ServiceName: return cluster.Mock(), nil case labelstore.ServiceName: - return labelstore.New(nil), nil + return labelstore.New(nil, prometheus_client.DefaultRegisterer), nil default: return nil, fmt.Errorf("service %q does not exist", name) } @@ -114,7 +114,7 @@ func TestForwardingToAppendable(t *testing.T) { // Update the component with a mock receiver; it should be passed along to the Appendable. var receivedTs int64 var receivedSamples labels.Labels - ls := labelstore.New(nil) + ls := labelstore.New(nil, prometheus_client.DefaultRegisterer) fanout := prometheus.NewInterceptor(nil, ls, prometheus.WithAppendHook(func(ref storage.SeriesRef, l labels.Labels, t int64, _ float64, _ storage.Appender) (storage.SeriesRef, error) { receivedTs = t receivedSamples = l @@ -193,7 +193,7 @@ func TestCustomDialer(t *testing.T) { case cluster.ServiceName: return cluster.Mock(), nil case labelstore.ServiceName: - return labelstore.New(nil), nil + return labelstore.New(nil, prometheus_client.DefaultRegisterer), nil default: return nil, fmt.Errorf("service %q does not exist", name) diff --git a/component/pyroscope/ebpf/ebpf_linux.go b/component/pyroscope/ebpf/ebpf_linux.go index 28910b92bce2..b8b1afbecf59 100644 --- a/component/pyroscope/ebpf/ebpf_linux.go +++ b/component/pyroscope/ebpf/ebpf_linux.go @@ -162,10 +162,14 @@ func (c *Component) collectProfiles() error { level.Debug(c.options.Logger).Log("msg", "ebpf collectProfiles") args := c.args builders := pprof.NewProfileBuilders(int64(args.SampleRate)) - err := c.session.CollectProfiles(func(target *sd.Target, stack []string, value uint64, pid uint32) { + err := c.session.CollectProfiles(func(target *sd.Target, stack []string, value uint64, pid uint32, aggregation ebpfspy.SampleAggregation) { labelsHash, labels := target.Labels() builder := builders.BuilderForTarget(labelsHash, labels) - builder.AddSample(stack, value) + if aggregation == ebpfspy.SampleAggregated { + builder.CreateSample(stack, value) + } else { + builder.CreateSampleOrAddValue(stack, value) + } }) if err != nil { diff --git a/component/pyroscope/ebpf/ebpf_linux_test.go b/component/pyroscope/ebpf/ebpf_linux_test.go index 7d91e894ed4c..d9216dda622d 100644 --- a/component/pyroscope/ebpf/ebpf_linux_test.go +++ b/component/pyroscope/ebpf/ebpf_linux_test.go @@ -45,13 +45,13 @@ func (m *mockSession) UpdateTargets(_ sd.TargetsOptions) { } -func (m *mockSession) CollectProfiles(f func(target *sd.Target, stack []string, value uint64, pid uint32)) error { +func (m *mockSession) CollectProfiles(f ebpfspy.CollectProfilesCallback) error { m.collected++ if m.collectError != nil { return m.collectError } for _, stack := range m.data { - f(m.dataTarget, stack, 1, 1) + f(m.dataTarget, stack, 1, 1, ebpfspy.SampleNotAggregated) } return nil } diff --git a/component/pyroscope/ebpf/ebpf_placeholder.go b/component/pyroscope/ebpf/ebpf_placeholder.go index 6fd928ff496f..9c0be2748f3f 100644 --- a/component/pyroscope/ebpf/ebpf_placeholder.go +++ b/component/pyroscope/ebpf/ebpf_placeholder.go @@ -1,4 +1,4 @@ -//go:build linux && !arm64 && !amd64 +//go:build !(linux && (arm64 || amd64)) package ebpf @@ -26,7 +26,7 @@ type Component struct { } func New(opts component.Options, args Arguments) (component.Component, error) { - level.Warn(opts.Logger).Log("msg", "the pyroscope.ebpf component only works on linux; enabling it otherwise will do nothing") + level.Warn(opts.Logger).Log("msg", "the pyroscope.ebpf component only works on ARM64 and AMD64 Linux platforms; enabling it otherwise will do nothing") return &Component{}, nil } diff --git a/component/pyroscope/scrape/scrape.go b/component/pyroscope/scrape/scrape.go index 846fee049e5e..bf84c1567e4f 100644 --- a/component/pyroscope/scrape/scrape.go +++ b/component/pyroscope/scrape/scrape.go @@ -33,9 +33,8 @@ const ( func init() { component.Register(component.Registration{ - Name: "pyroscope.scrape", - Args: Arguments{}, - NeedsServices: []string{cluster.ServiceName}, + Name: "pyroscope.scrape", + Args: Arguments{}, Build: func(opts component.Options, args component.Arguments) (component.Component, error) { return New(opts, args.(Arguments)) diff --git a/component/pyroscope/scrape/scrape_loop.go b/component/pyroscope/scrape/scrape_loop.go index 5abb3fd801f3..a1f7d2a6c1b7 100644 --- a/component/pyroscope/scrape/scrape_loop.go +++ b/component/pyroscope/scrape/scrape_loop.go @@ -12,18 +12,17 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/component/pyroscope" + "github.com/grafana/agent/internal/useragent" "github.com/grafana/agent/pkg/flow/logging/level" commonconfig "github.com/prometheus/common/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/pool" "golang.org/x/net/context/ctxhttp" - - "github.com/grafana/agent/pkg/build" ) var ( payloadBuffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) - userAgentHeader = fmt.Sprintf("GrafanaAgent/%s", build.Version) + userAgentHeader = useragent.Get() ) type scrapePool struct { @@ -229,7 +228,7 @@ func (t *scrapeLoop) scrape() { } } if err := t.fetchProfile(scrapeCtx, profileType, buf); err != nil { - level.Debug(t.logger).Log("msg", "fetch profile failed", "target", t.Labels().String(), "err", err) + level.Error(t.logger).Log("msg", "fetch profile failed", "target", t.Labels().String(), "err", err) t.updateTargetStatus(start, err) return } diff --git a/component/pyroscope/write/write.go b/component/pyroscope/write/write.go index e92c7dfb1706..4c20797a611c 100644 --- a/component/pyroscope/write/write.go +++ b/component/pyroscope/write/write.go @@ -3,12 +3,13 @@ package write import ( "context" "errors" - "fmt" "strings" "time" "github.com/bufbuild/connect-go" "github.com/grafana/agent/component/pyroscope" + "github.com/grafana/agent/internal/agentseed" + "github.com/grafana/agent/internal/useragent" "github.com/grafana/agent/pkg/flow/logging/level" "github.com/oklog/run" commonconfig "github.com/prometheus/common/config" @@ -18,7 +19,6 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/component/common/config" - "github.com/grafana/agent/pkg/build" "github.com/grafana/dskit/backoff" pushv1 "github.com/grafana/pyroscope/api/gen/proto/go/push/v1" "github.com/grafana/pyroscope/api/gen/proto/go/push/v1/pushv1connect" @@ -26,7 +26,7 @@ import ( ) var ( - userAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version) + userAgent = useragent.Get() DefaultArguments = func() Arguments { return Arguments{} } @@ -157,7 +157,12 @@ type fanOutClient struct { // NewFanOut creates a new fan out client that will fan out to all endpoints. func NewFanOut(opts component.Options, config Arguments, metrics *metrics) (*fanOutClient, error) { clients := make([]pushv1connect.PusherServiceClient, 0, len(config.Endpoints)) + uid := agentseed.Get().UID for _, endpoint := range config.Endpoints { + if endpoint.Headers == nil { + endpoint.Headers = map[string]string{} + } + endpoint.Headers[agentseed.HeaderName] = uid httpClient, err := commonconfig.NewClientFromConfig(*endpoint.HTTPClientConfig.Convert(), endpoint.Name) if err != nil { return nil, err diff --git a/component/registry.go b/component/registry.go index 20a364be81c4..11cc593b0ddd 100644 --- a/component/registry.go +++ b/component/registry.go @@ -10,6 +10,8 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) // The parsedName of a component is the parts of its name ("remote.http") split @@ -97,8 +99,7 @@ type Options struct { // GetServiceData retrieves data for a service by calling // [service.Service.Data] for the specified service. // - // GetServiceData will return an error if the service does not exist or was - // not listed as a dependency with the registration of the component. + // GetServiceData will return an error if the service does not exist. // // The result of GetServiceData may be cached as the value will not change at // runtime. @@ -127,15 +128,6 @@ type Registration struct { // A component which does not expose exports must leave this set to nil. Exports Exports - // NeedsServices holds the set of service names which this component depends - // on to run. If NeedsServices includes an invalid service name (either - // because of a cyclic dependency or the named service doesn't exist), - // components will fail to evaluate. - // - // Modules which are loaded by the registered component will only be able to - // access services in this list. - NeedsServices []string - // Build should construct a new component from an initial Arguments and set // of options. Build func(opts Options, args Arguments) (Component, error) @@ -212,3 +204,9 @@ func Get(name string) (Registration, bool) { r, ok := registered[name] return r, ok } + +func AllNames() []string { + keys := maps.Keys(registered) + slices.Sort(keys) + return keys +} diff --git a/component/remote/http/http.go b/component/remote/http/http.go index 5212e0ffc19a..20fe5b8ec11d 100644 --- a/component/remote/http/http.go +++ b/component/remote/http/http.go @@ -17,14 +17,14 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/component" common_config "github.com/grafana/agent/component/common/config" - "github.com/grafana/agent/pkg/build" + "github.com/grafana/agent/internal/useragent" "github.com/grafana/agent/pkg/flow/logging/level" "github.com/grafana/river/rivertypes" "github.com/prometheus/client_golang/prometheus" prom_config "github.com/prometheus/common/config" ) -var userAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version) +var userAgent = useragent.Get() func init() { component.Register(component.Registration{ @@ -48,6 +48,7 @@ type Arguments struct { Method string `river:"method,attr,optional"` Headers map[string]string `river:"headers,attr,optional"` + Body string `river:"body,attr,optional"` Client common_config.HTTPClientConfig `river:"client,block,optional"` } @@ -232,7 +233,12 @@ func (c *Component) pollError() error { ctx, cancel := context.WithTimeout(context.Background(), c.args.PollTimeout) defer cancel() - req, err := http.NewRequest(c.args.Method, c.args.URL, nil) + var body io.Reader + if c.args.Body != "" { + body = strings.NewReader(c.args.Body) + } + + req, err := http.NewRequest(c.args.Method, c.args.URL, body) if err != nil { level.Error(c.log).Log("msg", "failed to build request", "err", err) return fmt.Errorf("building request: %w", err) diff --git a/component/remote/http/http_test.go b/component/remote/http/http_test.go index f8ef2214394f..e59d7c6e74c9 100644 --- a/component/remote/http/http_test.go +++ b/component/remote/http/http_test.go @@ -3,6 +3,7 @@ package http_test import ( "context" "fmt" + "io" "net/http" "net/http/httptest" "sync" @@ -27,6 +28,9 @@ func Test(t *testing.T) { defer srv.Close() handler.SetHandler(func(w http.ResponseWriter, r *http.Request) { + b, err := io.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, string(b), "hello there!") fmt.Fprintln(w, "Hello, world!") }) @@ -40,10 +44,11 @@ func Test(t *testing.T) { "x-custom" = "value", "User-Agent" = "custom_useragent", } + body = "%s" poll_frequency = "50ms" poll_timeout = "25ms" - `, srv.URL, http.MethodPut) + `, srv.URL, http.MethodPut, "hello there!") var args http_component.Arguments require.NoError(t, river.Unmarshal([]byte(cfg), &args)) diff --git a/converter/internal/prometheusconvert/component/ec2.go b/converter/internal/prometheusconvert/component/ec2.go index acd89755d165..5edf6ec0bac3 100644 --- a/converter/internal/prometheusconvert/component/ec2.go +++ b/converter/internal/prometheusconvert/component/ec2.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/agent/converter/internal/common" "github.com/grafana/agent/converter/internal/prometheusconvert/build" "github.com/grafana/river/rivertypes" - prom_config "github.com/prometheus/common/config" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) @@ -22,27 +21,7 @@ func appendDiscoveryEC2(pb *build.PrometheusBlocks, label string, sdConfig *prom } func ValidateDiscoveryEC2(sdConfig *prom_aws.EC2SDConfig) diag.Diagnostics { - var diags diag.Diagnostics - - var nilBasicAuth *prom_config.BasicAuth - var nilAuthorization *prom_config.Authorization - var nilOAuth2 *prom_config.OAuth2 - - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.BasicAuth, nilBasicAuth, "ec2_sd_configs basic_auth", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.Authorization, nilAuthorization, "ec2_sd_configs authorization", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.OAuth2, nilOAuth2, "ec2_sd_configs oauth2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerToken, prom_config.DefaultHTTPClientConfig.BearerToken, "ec2_sd_configs bearer_token", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerTokenFile, prom_config.DefaultHTTPClientConfig.BearerTokenFile, "ec2_sd_configs bearer_token_file", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.FollowRedirects, prom_config.DefaultHTTPClientConfig.FollowRedirects, "ec2_sd_configs follow_redirects", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.EnableHTTP2, prom_config.DefaultHTTPClientConfig.EnableHTTP2, "ec2_sd_configs enable_http2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.ProxyConfig, prom_config.DefaultHTTPClientConfig.ProxyConfig, "ec2_sd_configs proxy", "")) - - // Do a last check in case any of the specific checks missed anything. - if len(diags) == 0 { - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig, prom_config.DefaultHTTPClientConfig, "ec2_sd_configs http_client_config", "")) - } - - return diags + return common.ValidateHttpClientConfig(&sdConfig.HTTPClientConfig) } func toDiscoveryEC2(sdConfig *prom_aws.EC2SDConfig) *aws.EC2Arguments { @@ -51,15 +30,16 @@ func toDiscoveryEC2(sdConfig *prom_aws.EC2SDConfig) *aws.EC2Arguments { } return &aws.EC2Arguments{ - Endpoint: sdConfig.Endpoint, - Region: sdConfig.Region, - AccessKey: sdConfig.AccessKey, - SecretKey: rivertypes.Secret(sdConfig.SecretKey), - Profile: sdConfig.Profile, - RoleARN: sdConfig.RoleARN, - RefreshInterval: time.Duration(sdConfig.RefreshInterval), - Port: sdConfig.Port, - Filters: toEC2Filters(sdConfig.Filters), + Endpoint: sdConfig.Endpoint, + Region: sdConfig.Region, + AccessKey: sdConfig.AccessKey, + SecretKey: rivertypes.Secret(sdConfig.SecretKey), + Profile: sdConfig.Profile, + RoleARN: sdConfig.RoleARN, + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Port: sdConfig.Port, + Filters: toEC2Filters(sdConfig.Filters), + HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), } } diff --git a/converter/internal/prometheusconvert/component/http.go b/converter/internal/prometheusconvert/component/http.go new file mode 100644 index 000000000000..5a6fde97fc1e --- /dev/null +++ b/converter/internal/prometheusconvert/component/http.go @@ -0,0 +1,43 @@ +package component + +import ( + "net/url" + "time" + + "github.com/grafana/agent/component/common/config" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/discovery/http" + "github.com/grafana/agent/converter/diag" + "github.com/grafana/agent/converter/internal/common" + "github.com/grafana/agent/converter/internal/prometheusconvert/build" + prom_http "github.com/prometheus/prometheus/discovery/http" +) + +func appendDiscoveryHttp(pb *build.PrometheusBlocks, label string, sdConfig *prom_http.SDConfig) discovery.Exports { + discoveryFileArgs := toDiscoveryHttp(sdConfig) + name := []string{"discovery", "http"} + block := common.NewBlockWithOverride(name, label, discoveryFileArgs) + pb.DiscoveryBlocks = append(pb.DiscoveryBlocks, build.NewPrometheusBlock(block, name, label, "", "")) + return common.NewDiscoveryExports("discovery.http." + label + ".targets") +} + +func ValidateDiscoveryHttp(sdConfig *prom_http.SDConfig) diag.Diagnostics { + return common.ValidateHttpClientConfig(&sdConfig.HTTPClientConfig) +} + +func toDiscoveryHttp(sdConfig *prom_http.SDConfig) *http.Arguments { + if sdConfig == nil { + return nil + } + + url, err := url.Parse(sdConfig.URL) + if err != nil { + panic("invalid http_sd_configs url provided") + } + + return &http.Arguments{ + HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + URL: config.URL{URL: url}, + } +} diff --git a/converter/internal/prometheusconvert/component/lightsail.go b/converter/internal/prometheusconvert/component/lightsail.go index 10480bd082ff..9a97c2f506b4 100644 --- a/converter/internal/prometheusconvert/component/lightsail.go +++ b/converter/internal/prometheusconvert/component/lightsail.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/agent/converter/internal/common" "github.com/grafana/agent/converter/internal/prometheusconvert/build" "github.com/grafana/river/rivertypes" - prom_config "github.com/prometheus/common/config" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) @@ -22,27 +21,7 @@ func appendDiscoveryLightsail(pb *build.PrometheusBlocks, label string, sdConfig } func ValidateDiscoveryLightsail(sdConfig *prom_aws.LightsailSDConfig) diag.Diagnostics { - var diags diag.Diagnostics - - var nilBasicAuth *prom_config.BasicAuth - var nilAuthorization *prom_config.Authorization - var nilOAuth2 *prom_config.OAuth2 - - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.BasicAuth, nilBasicAuth, "lightsail_sd_configs basic_auth", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.Authorization, nilAuthorization, "lightsail_sd_configs authorization", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.OAuth2, nilOAuth2, "lightsail_sd_configs oauth2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerToken, prom_config.DefaultHTTPClientConfig.BearerToken, "lightsail_sd_configs bearer_token", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerTokenFile, prom_config.DefaultHTTPClientConfig.BearerTokenFile, "lightsail_sd_configs bearer_token_file", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.FollowRedirects, prom_config.DefaultHTTPClientConfig.FollowRedirects, "lightsail_sd_configs follow_redirects", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.EnableHTTP2, prom_config.DefaultHTTPClientConfig.EnableHTTP2, "lightsail_sd_configs enable_http2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.ProxyConfig, prom_config.DefaultHTTPClientConfig.ProxyConfig, "lightsail_sd_configs proxy", "")) - - // Do a last check in case any of the specific checks missed anything. - if len(diags) == 0 { - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig, prom_config.DefaultHTTPClientConfig, "lightsail_sd_configs http_client_config", "")) - } - - return diags + return common.ValidateHttpClientConfig(&sdConfig.HTTPClientConfig) } func toDiscoveryLightsail(sdConfig *prom_aws.LightsailSDConfig) *aws.LightsailArguments { @@ -51,13 +30,14 @@ func toDiscoveryLightsail(sdConfig *prom_aws.LightsailSDConfig) *aws.LightsailAr } return &aws.LightsailArguments{ - Endpoint: sdConfig.Endpoint, - Region: sdConfig.Region, - AccessKey: sdConfig.AccessKey, - SecretKey: rivertypes.Secret(sdConfig.SecretKey), - Profile: sdConfig.Profile, - RoleARN: sdConfig.RoleARN, - RefreshInterval: time.Duration(sdConfig.RefreshInterval), - Port: sdConfig.Port, + Endpoint: sdConfig.Endpoint, + Region: sdConfig.Region, + AccessKey: sdConfig.AccessKey, + SecretKey: rivertypes.Secret(sdConfig.SecretKey), + Profile: sdConfig.Profile, + RoleARN: sdConfig.RoleARN, + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Port: sdConfig.Port, + HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), } } diff --git a/converter/internal/prometheusconvert/component/ovhcloud.go b/converter/internal/prometheusconvert/component/ovhcloud.go new file mode 100644 index 000000000000..f4a59fd525cf --- /dev/null +++ b/converter/internal/prometheusconvert/component/ovhcloud.go @@ -0,0 +1,40 @@ +package component + +import ( + "time" + + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/discovery/ovhcloud" + "github.com/grafana/agent/converter/diag" + "github.com/grafana/agent/converter/internal/common" + "github.com/grafana/agent/converter/internal/prometheusconvert/build" + "github.com/grafana/river/rivertypes" + prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" +) + +func appendDiscoveryOvhcloud(pb *build.PrometheusBlocks, label string, sdConfig *prom_discovery.SDConfig) discovery.Exports { + discoveryOvhcloudArgs := toDiscoveryOvhcloud(sdConfig) + name := []string{"discovery", "ovhcloud"} + block := common.NewBlockWithOverride(name, label, discoveryOvhcloudArgs) + pb.DiscoveryBlocks = append(pb.DiscoveryBlocks, build.NewPrometheusBlock(block, name, label, "", "")) + return common.NewDiscoveryExports("discovery.ovhcloud." + label + ".targets") +} + +func ValidateDiscoveryOvhcloud(sdConfig *prom_discovery.SDConfig) diag.Diagnostics { + return nil +} + +func toDiscoveryOvhcloud(sdConfig *prom_discovery.SDConfig) *ovhcloud.Arguments { + if sdConfig == nil { + return nil + } + + return &ovhcloud.Arguments{ + Endpoint: sdConfig.Endpoint, + ApplicationKey: sdConfig.ApplicationKey, + ApplicationSecret: rivertypes.Secret(sdConfig.ApplicationSecret), + ConsumerKey: rivertypes.Secret(sdConfig.ConsumerKey), + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Service: sdConfig.Service, + } +} diff --git a/converter/internal/prometheusconvert/component/relabel.go b/converter/internal/prometheusconvert/component/relabel.go index de6ec420ec21..a3bee3c6b7dd 100644 --- a/converter/internal/prometheusconvert/component/relabel.go +++ b/converter/internal/prometheusconvert/component/relabel.go @@ -36,6 +36,7 @@ func toRelabelArguments(relabelConfigs []*prom_relabel.Config, forwardTo []stora return &relabel.Arguments{ ForwardTo: forwardTo, MetricRelabelConfigs: ToFlowRelabelConfigs(relabelConfigs), + CacheSize: 100_000, } } diff --git a/converter/internal/prometheusconvert/component/remote_write.go b/converter/internal/prometheusconvert/component/remote_write.go index 4756f84d6674..37c4c6814a04 100644 --- a/converter/internal/prometheusconvert/component/remote_write.go +++ b/converter/internal/prometheusconvert/component/remote_write.go @@ -96,6 +96,7 @@ func toQueueOptions(queueConfig *prom_config.QueueConfig) *remotewrite.QueueOpti MinBackoff: time.Duration(queueConfig.MinBackoff), MaxBackoff: time.Duration(queueConfig.MaxBackoff), RetryOnHTTP429: queueConfig.RetryOnRateLimit, + SampleAgeLimit: time.Duration(queueConfig.SampleAgeLimit), } } diff --git a/converter/internal/prometheusconvert/component/scrape.go b/converter/internal/prometheusconvert/component/scrape.go index 651e4e2c5bc1..a2005cf85fbb 100644 --- a/converter/internal/prometheusconvert/component/scrape.go +++ b/converter/internal/prometheusconvert/component/scrape.go @@ -30,7 +30,12 @@ func AppendPrometheusScrape(pb *build.PrometheusBlocks, scrapeConfig *prom_confi func ValidatePrometheusScrape(scrapeConfig *prom_config.ScrapeConfig) diag.Diagnostics { var diags diag.Diagnostics + // https://github.com/grafana/agent/pull/5972#discussion_r1441980155 + diags.AddAll(common.ValidateSupported(common.NotEquals, scrapeConfig.TrackTimestampsStaleness, false, "scrape_configs track_timestamps_staleness", "")) + // https://github.com/prometheus/prometheus/commit/40240c9c1cb290fe95f1e61886b23fab860aeacd diags.AddAll(common.ValidateSupported(common.NotEquals, scrapeConfig.NativeHistogramBucketLimit, uint(0), "scrape_configs native_histogram_bucket_limit", "")) + // https://github.com/prometheus/prometheus/pull/12647 + diags.AddAll(common.ValidateSupported(common.NotEquals, scrapeConfig.KeepDroppedTargets, uint(0), "scrape_configs keep_dropped_targets", "")) diags.AddAll(common.ValidateHttpClientConfig(&scrapeConfig.HTTPClientConfig)) return diags diff --git a/converter/internal/prometheusconvert/component/service_discovery.go b/converter/internal/prometheusconvert/component/service_discovery.go index 578c9f540fae..69c179f1ef9a 100644 --- a/converter/internal/prometheusconvert/component/service_discovery.go +++ b/converter/internal/prometheusconvert/component/service_discovery.go @@ -9,6 +9,7 @@ import ( "github.com/grafana/agent/converter/internal/prometheusconvert/build" prom_discover "github.com/prometheus/prometheus/discovery" + prom_http "github.com/prometheus/prometheus/discovery/http" _ "github.com/prometheus/prometheus/discovery/install" // Register Prometheus SDs prom_aws "github.com/prometheus/prometheus/discovery/aws" @@ -24,6 +25,7 @@ import ( prom_marathon "github.com/prometheus/prometheus/discovery/marathon" prom_docker "github.com/prometheus/prometheus/discovery/moby" prom_openstack "github.com/prometheus/prometheus/discovery/openstack" + prom_ovhcloud "github.com/prometheus/prometheus/discovery/ovhcloud" prom_scaleway "github.com/prometheus/prometheus/discovery/scaleway" prom_triton "github.com/prometheus/prometheus/discovery/triton" prom_xds "github.com/prometheus/prometheus/discovery/xds" @@ -60,6 +62,9 @@ func AppendServiceDiscoveryConfig(pb *build.PrometheusBlocks, serviceDiscoveryCo case *prom_gce.SDConfig: labelCounts["gce"]++ return appendDiscoveryGCE(pb, common.LabelWithIndex(labelCounts["gce"]-1, label), sdc) + case *prom_http.SDConfig: + labelCounts["http"]++ + return appendDiscoveryHttp(pb, common.LabelWithIndex(labelCounts["http"]-1, label), sdc) case *prom_kubernetes.SDConfig: labelCounts["kubernetes"]++ return appendDiscoveryKubernetes(pb, common.LabelWithIndex(labelCounts["kubernetes"]-1, label), sdc) @@ -96,6 +101,9 @@ func AppendServiceDiscoveryConfig(pb *build.PrometheusBlocks, serviceDiscoveryCo case *prom_docker.DockerSwarmSDConfig: labelCounts["dockerswarm"]++ return appendDiscoveryDockerswarm(pb, common.LabelWithIndex(labelCounts["dockerswarm"]-1, label), sdc) + case *prom_ovhcloud.SDConfig: + labelCounts["ovhcloud"]++ + return appendDiscoveryOvhcloud(pb, common.LabelWithIndex(labelCounts["ovhcloud"]-1, label), sdc) default: return discovery.Exports{} } @@ -121,6 +129,8 @@ func ValidateServiceDiscoveryConfig(serviceDiscoveryConfig prom_discover.Config) return ValidateDiscoveryFile(sdc) case *prom_gce.SDConfig: return ValidateDiscoveryGCE(sdc) + case *prom_http.SDConfig: + return ValidateDiscoveryHttp(sdc) case *prom_kubernetes.SDConfig: return ValidateDiscoveryKubernetes(sdc) case *prom_aws.LightsailSDConfig: @@ -145,6 +155,8 @@ func ValidateServiceDiscoveryConfig(serviceDiscoveryConfig prom_discover.Config) return ValidateDiscoveryOpenstack(sdc) case *prom_docker.DockerSwarmSDConfig: return ValidateDiscoveryDockerswarm(sdc) + case *prom_ovhcloud.SDConfig: + return ValidateDiscoveryOvhcloud(sdc) default: var diags diag.Diagnostics diags.Add(diag.SeverityLevelError, fmt.Sprintf("The converter does not support converting the provided %s service discovery.", serviceDiscoveryConfig.Name())) diff --git a/converter/internal/prometheusconvert/testdata/azure.river b/converter/internal/prometheusconvert/testdata/azure.river index 368673474b22..e1bc751bf05d 100644 --- a/converter/internal/prometheusconvert/testdata/azure.river +++ b/converter/internal/prometheusconvert/testdata/azure.river @@ -10,8 +10,6 @@ discovery.azure "prometheus1" { managed_identity { client_id = "client" } - follow_redirects = true - enable_http2 = true } discovery.azure "prometheus2" { @@ -26,8 +24,8 @@ discovery.azure "prometheus2" { managed_identity { client_id = "client" } - proxy_url = "proxy" - enable_http2 = true + proxy_url = "proxy" + follow_redirects = false } prometheus.scrape "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/consul.river b/converter/internal/prometheusconvert/testdata/consul.river index b3d7879ed47e..ccf9e8c189c3 100644 --- a/converter/internal/prometheusconvert/testdata/consul.river +++ b/converter/internal/prometheusconvert/testdata/consul.river @@ -1,13 +1,9 @@ discovery.consul "prometheus1" { - services = ["myapp"] - follow_redirects = true - enable_http2 = true + services = ["myapp"] } discovery.consul "prometheus2" { - services = ["otherapp"] - follow_redirects = true - enable_http2 = true + services = ["otherapp"] } prometheus.scrape "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/digitalocean.river b/converter/internal/prometheusconvert/testdata/digitalocean.river index 4e39bf9be2c6..27b0629afc15 100644 --- a/converter/internal/prometheusconvert/testdata/digitalocean.river +++ b/converter/internal/prometheusconvert/testdata/digitalocean.river @@ -1,12 +1,6 @@ -discovery.digitalocean "prometheus1" { - follow_redirects = true - enable_http2 = true -} +discovery.digitalocean "prometheus1" { } -discovery.digitalocean "prometheus2" { - follow_redirects = true - enable_http2 = true -} +discovery.digitalocean "prometheus2" { } prometheus.scrape "prometheus1" { targets = concat( diff --git a/converter/internal/prometheusconvert/testdata/discovery.river b/converter/internal/prometheusconvert/testdata/discovery.river index 4ff3cc509ce3..f2c59fa7a4f6 100644 --- a/converter/internal/prometheusconvert/testdata/discovery.river +++ b/converter/internal/prometheusconvert/testdata/discovery.river @@ -10,8 +10,6 @@ discovery.azure "prometheus1" { managed_identity { client_id = "client1" } - follow_redirects = true - enable_http2 = true } discovery.azure "prometheus1_2" { @@ -26,8 +24,6 @@ discovery.azure "prometheus1_2" { managed_identity { client_id = "client2" } - follow_redirects = true - enable_http2 = true } discovery.relabel "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/discovery_relabel.river b/converter/internal/prometheusconvert/testdata/discovery_relabel.river index 8d37e5c3a9ad..4b1009886810 100644 --- a/converter/internal/prometheusconvert/testdata/discovery_relabel.river +++ b/converter/internal/prometheusconvert/testdata/discovery_relabel.river @@ -10,8 +10,6 @@ discovery.azure "prometheus2" { managed_identity { client_id = "client" } - follow_redirects = true - enable_http2 = true } discovery.relabel "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/ec2.diags b/converter/internal/prometheusconvert/testdata/ec2.diags deleted file mode 100644 index 3301a9ad2213..000000000000 --- a/converter/internal/prometheusconvert/testdata/ec2.diags +++ /dev/null @@ -1 +0,0 @@ -(Error) The converter does not support converting the provided ec2_sd_configs bearer_token_file config. \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/ec2.river b/converter/internal/prometheusconvert/testdata/ec2.river index 22775efe8ed2..d07d133a659b 100644 --- a/converter/internal/prometheusconvert/testdata/ec2.river +++ b/converter/internal/prometheusconvert/testdata/ec2.river @@ -3,6 +3,11 @@ discovery.ec2 "prometheus1" { access_key = "YOUR_ACCESS_KEY" secret_key = "YOUR_SECRET_KEY" port = 8080 + + authorization { + type = "Bearer" + credentials_file = "/tmp/token.file" + } } discovery.ec2 "prometheus2" { diff --git a/converter/internal/prometheusconvert/testdata/http.river b/converter/internal/prometheusconvert/testdata/http.river new file mode 100644 index 000000000000..3184bf527e0d --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/http.river @@ -0,0 +1,72 @@ +discovery.http "netbox_snmp" { + authorization { + type = "Token" + credentials_file = "/run/secrets/netbox_token" + } + follow_redirects = true + enable_http2 = true + refresh_interval = "15s" + url = "http://netbox:8080/api/plugins/prometheus-sd/devices?status=active&cf_prometheus_job=netbox_snmp" +} + +discovery.relabel "netbox_snmp" { + targets = discovery.http.netbox_snmp.targets + + rule { + source_labels = ["__meta_netbox_primary_ip"] + target_label = "instance" + } + + rule { + source_labels = ["__meta_netbox_site"] + target_label = "site" + } + + rule { + source_labels = ["__meta_netbox_location"] + target_label = "room" + } + + rule { + source_labels = ["__meta_netbox_name"] + target_label = "name" + } + + rule { + source_labels = ["instance"] + target_label = "__param_target" + } + + rule { + source_labels = ["__meta_netbox_custom_field_prometheus_snmp_module"] + target_label = "__param_module" + } + + rule { + source_labels = ["__meta_netbox_custom_field_prometheus_snmp_auth"] + target_label = "__param_auth" + } + + rule { + target_label = "__address__" + replacement = "snmp-exporter:9116" + } +} + +prometheus.scrape "netbox_snmp" { + targets = discovery.relabel.netbox_snmp.output + forward_to = [prometheus.remote_write.default.receiver] + job_name = "netbox_snmp" + metrics_path = "/snmp" +} + +prometheus.remote_write "default" { + endpoint { + name = "remote1" + url = "http://remote-write-url1" + + queue_config { } + + metadata_config { } + } +} diff --git a/converter/internal/prometheusconvert/testdata/http.yaml b/converter/internal/prometheusconvert/testdata/http.yaml new file mode 100644 index 000000000000..ca9e954db1b7 --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/http.yaml @@ -0,0 +1,34 @@ +global: + scrape_interval: 60s + +scrape_configs: + - job_name: netbox_snmp + metrics_path: /snmp + http_sd_configs: + - url: http://netbox:8080/api/plugins/prometheus-sd/devices?status=active&cf_prometheus_job=netbox_snmp + refresh_interval: 15s + authorization: + type: Token + credentials_file: /run/secrets/netbox_token + relabel_configs: + - source_labels: [__meta_netbox_primary_ip] + target_label: instance + - source_labels: [__meta_netbox_site] + target_label: site + - source_labels: [__meta_netbox_location] + target_label: room + - source_labels: [__meta_netbox_name] + target_label: name + - source_labels: [instance] + target_label: __param_target + - source_labels: [__meta_netbox_custom_field_prometheus_snmp_module] + target_label: __param_module + - source_labels: [__meta_netbox_custom_field_prometheus_snmp_auth] + target_label: __param_auth + # replaces "address" with SNMP exporter's real hostname:port + - target_label: __address__ + replacement: snmp-exporter:9116 + +remote_write: + - name: "remote1" + url: "http://remote-write-url1" \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/lightsail.diags b/converter/internal/prometheusconvert/testdata/lightsail.diags deleted file mode 100644 index 0a96d20e3985..000000000000 --- a/converter/internal/prometheusconvert/testdata/lightsail.diags +++ /dev/null @@ -1 +0,0 @@ -(Error) The converter does not support converting the provided lightsail_sd_configs bearer_token_file config. \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/lightsail.river b/converter/internal/prometheusconvert/testdata/lightsail.river index 754d9c5d39ea..4e1966490532 100644 --- a/converter/internal/prometheusconvert/testdata/lightsail.river +++ b/converter/internal/prometheusconvert/testdata/lightsail.river @@ -3,6 +3,11 @@ discovery.lightsail "prometheus1" { access_key = "YOUR_ACCESS_KEY" secret_key = "YOUR_SECRET_KEY" port = 8080 + + authorization { + type = "Bearer" + credentials_file = "/tmp/token.file" + } } discovery.lightsail "prometheus2" { diff --git a/converter/internal/prometheusconvert/testdata/ovhcloud.river b/converter/internal/prometheusconvert/testdata/ovhcloud.river new file mode 100644 index 000000000000..dff1e85bcee3 --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/ovhcloud.river @@ -0,0 +1,43 @@ +discovery.ovhcloud "prometheus1" { + application_key = "app_key" + application_secret = "app_secret" + consumer_key = "cons_key" + service = "vps" +} + +discovery.ovhcloud "prometheus2" { + endpoint = "ovh-us" + application_key = "app_key_2" + application_secret = "app_secret_2" + consumer_key = "cons_key_2" + refresh_interval = "14m0s" + service = "dedicated_server" +} + +prometheus.scrape "prometheus1" { + targets = concat( + discovery.ovhcloud.prometheus1.targets, + [{ + __address__ = "localhost:9090", + }], + ) + forward_to = [prometheus.remote_write.default.receiver] + job_name = "prometheus1" +} + +prometheus.scrape "prometheus2" { + targets = discovery.ovhcloud.prometheus2.targets + forward_to = [prometheus.remote_write.default.receiver] + job_name = "prometheus2" +} + +prometheus.remote_write "default" { + endpoint { + name = "remote1" + url = "http://remote-write-url1" + + queue_config { } + + metadata_config { } + } +} diff --git a/converter/internal/prometheusconvert/testdata/ovhcloud.yaml b/converter/internal/prometheusconvert/testdata/ovhcloud.yaml new file mode 100644 index 000000000000..2201686989fc --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/ovhcloud.yaml @@ -0,0 +1,21 @@ +scrape_configs: + - job_name: "prometheus1" + static_configs: + - targets: ["localhost:9090"] + ovhcloud_sd_configs: + - application_key: "app_key" + application_secret: "app_secret" + consumer_key: "cons_key" + service: "vps" + - job_name: "prometheus2" + ovhcloud_sd_configs: + - application_key: "app_key_2" + application_secret: "app_secret_2" + consumer_key: "cons_key_2" + service: "dedicated_server" + endpoint: "ovh-us" + refresh_interval: "14m" + +remote_write: + - name: "remote1" + url: "http://remote-write-url1" \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/scrape.diags b/converter/internal/prometheusconvert/testdata/scrape.diags new file mode 100644 index 000000000000..de85de6536cf --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/scrape.diags @@ -0,0 +1 @@ +(Error) The converter does not support converting the provided scrape_configs track_timestamps_staleness config. \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/scrape.yaml b/converter/internal/prometheusconvert/testdata/scrape.yaml index d4b1e7e203c7..54496f296005 100644 --- a/converter/internal/prometheusconvert/testdata/scrape.yaml +++ b/converter/internal/prometheusconvert/testdata/scrape.yaml @@ -6,6 +6,7 @@ global: scrape_configs: - job_name: "prometheus-1" honor_timestamps: false + track_timestamps_staleness: true scrape_interval: 10s scrape_timeout: 5s static_configs: @@ -16,6 +17,7 @@ scrape_configs: username: 'user' password: 'pass' - job_name: "prometheus2" + track_timestamps_staleness: false static_configs: - targets: ["localhost:9091"] - targets: ["localhost:9092"] diff --git a/converter/internal/prometheusconvert/testdata/unsupported.diags b/converter/internal/prometheusconvert/testdata/unsupported.diags index ccdf9bd3da88..966bd0d1e5bf 100644 --- a/converter/internal/prometheusconvert/testdata/unsupported.diags +++ b/converter/internal/prometheusconvert/testdata/unsupported.diags @@ -5,6 +5,7 @@ (Error) The converter does not support converting the provided HTTP Client no_proxy config. (Error) The converter does not support converting the provided nomad service discovery. (Error) The converter does not support converting the provided scrape_configs native_histogram_bucket_limit config. +(Error) The converter does not support converting the provided scrape_configs keep_dropped_targets config. (Error) The converter does not support converting the provided storage config. (Error) The converter does not support converting the provided tracing config. (Error) The converter does not support converting the provided HTTP Client proxy_from_environment config. diff --git a/converter/internal/prometheusconvert/testdata/unsupported.yaml b/converter/internal/prometheusconvert/testdata/unsupported.yaml index bf677c030a39..5d174c36cb8e 100644 --- a/converter/internal/prometheusconvert/testdata/unsupported.yaml +++ b/converter/internal/prometheusconvert/testdata/unsupported.yaml @@ -44,6 +44,7 @@ scrape_configs: - targets: ["localhost:9091"] scrape_classic_histograms: true native_histogram_bucket_limit: 2 + keep_dropped_targets: 1000 remote_write: - name: "remote1" diff --git a/converter/internal/promtailconvert/internal/build/docker_sd.go b/converter/internal/promtailconvert/internal/build/docker_sd.go new file mode 100644 index 000000000000..5fcc953881f8 --- /dev/null +++ b/converter/internal/promtailconvert/internal/build/docker_sd.go @@ -0,0 +1,95 @@ +package build + +import ( + "time" + + "github.com/grafana/agent/component/common/loki" + flow_relabel "github.com/grafana/agent/component/common/relabel" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/discovery/docker" + loki_docker "github.com/grafana/agent/component/loki/source/docker" + "github.com/grafana/agent/converter/internal/common" + "github.com/prometheus/prometheus/discovery/moby" +) + +func (s *ScrapeConfigBuilder) AppendDockerPipeline() { + if len(s.cfg.DockerSDConfigs) == 0 { + return + } + + for i, sd := range s.cfg.DockerSDConfigs { + compLabel := common.LabelWithIndex(i, s.globalCtx.LabelPrefix, s.cfg.JobName) + + // Add discovery.docker + s.f.Body().AppendBlock(common.NewBlockWithOverride( + []string{"discovery", "docker"}, + compLabel, + toDiscoveryDocker(sd), + )) + + // The targets output from above component + targets := "discovery.docker." + compLabel + ".targets" + + // Add loki.source.docker + overrideHook := func(val interface{}) interface{} { + switch val.(type) { + case []discovery.Target: // override targets expression to our string + return common.CustomTokenizer{Expr: targets} + case flow_relabel.Rules: // use the relabel rules defined for this pipeline + return common.CustomTokenizer{Expr: s.getOrNewDiscoveryRelabelRules()} + } + return val + } + + forwardTo := s.getOrNewProcessStageReceivers() // forward to process stage, which forwards to writers + s.f.Body().AppendBlock(common.NewBlockWithOverrideFn( + []string{"loki", "source", "docker"}, + compLabel, + toLokiSourceDocker(sd, forwardTo), + overrideHook, + )) + } +} + +func toLokiSourceDocker(sd *moby.DockerSDConfig, forwardTo []loki.LogsReceiver) *loki_docker.Arguments { + return &loki_docker.Arguments{ + Host: sd.Host, + Targets: nil, + ForwardTo: forwardTo, + Labels: nil, + RelabelRules: flow_relabel.Rules{}, + HTTPClientConfig: common.ToHttpClientConfig(&sd.HTTPClientConfig), + RefreshInterval: time.Duration(sd.RefreshInterval), + } +} + +func toDiscoveryDocker(sdConfig *moby.DockerSDConfig) *docker.Arguments { + if sdConfig == nil { + return nil + } + + return &docker.Arguments{ + Host: sdConfig.Host, + Port: sdConfig.Port, + HostNetworkingHost: sdConfig.HostNetworkingHost, + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Filters: toFlowDockerSDFilters(sdConfig.Filters), + HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), + } +} + +func toFlowDockerSDFilters(filters []moby.Filter) []docker.Filter { + if len(filters) == 0 { + return nil + } + + flowFilters := make([]docker.Filter, len(filters)) + for i, filter := range filters { + flowFilters[i] = docker.Filter{ + Name: filter.Name, + Values: filter.Values, + } + } + + return flowFilters +} diff --git a/converter/internal/promtailconvert/internal/build/scrape_builder.go b/converter/internal/promtailconvert/internal/build/scrape_builder.go index fc26d29cc832..c7288be0fc01 100644 --- a/converter/internal/promtailconvert/internal/build/scrape_builder.go +++ b/converter/internal/promtailconvert/internal/build/scrape_builder.go @@ -103,6 +103,11 @@ func (s *ScrapeConfigBuilder) getOrNewLokiRelabel() string { args := lokirelabel.Arguments{ ForwardTo: s.getOrNewProcessStageReceivers(), RelabelConfigs: component.ToFlowRelabelConfigs(s.cfg.RelabelConfigs), + // max_cache_size doesnt exist in static, and we need to manually set it to default. + // Since the default is 10_000 if we didnt set the value, it would compare the default 10k to 0 and emit 0. + // We actually dont want to emit anything since this setting doesnt exist in static, setting to 10k matches the default + // and ensures it doesnt get emitted. + MaxCacheSize: lokirelabel.DefaultArguments.MaxCacheSize, } compLabel := common.LabelForParts(s.globalCtx.LabelPrefix, s.cfg.JobName) s.f.Body().AppendBlock(common.NewBlockWithOverride([]string{"loki", "relabel"}, compLabel, args)) diff --git a/converter/internal/promtailconvert/internal/build/service_discovery.go b/converter/internal/promtailconvert/internal/build/service_discovery.go index 6219bc2b121d..3405e0966a34 100644 --- a/converter/internal/promtailconvert/internal/build/service_discovery.go +++ b/converter/internal/promtailconvert/internal/build/service_discovery.go @@ -69,10 +69,6 @@ func toDiscoveryConfig(cfg *scrapeconfig.Config) prom_discover.Configs { sdConfigs = append(sdConfigs, sd) } - for _, sd := range cfg.DockerSDConfigs { - sdConfigs = append(sdConfigs, sd) - } - for _, sd := range cfg.ServiceDiscoveryConfig.DNSSDConfigs { sdConfigs = append(sdConfigs, sd) } diff --git a/converter/internal/promtailconvert/promtailconvert.go b/converter/internal/promtailconvert/promtailconvert.go index 21e1be683217..4983631c04be 100644 --- a/converter/internal/promtailconvert/promtailconvert.go +++ b/converter/internal/promtailconvert/promtailconvert.go @@ -166,4 +166,7 @@ func appendScrapeConfig( b.AppendAzureEventHubs() b.AppendGelfConfig() b.AppendHerokuDrainConfig() + + // Docker has a special treatment in Promtail, we replicate it here. + b.AppendDockerPipeline() } diff --git a/converter/internal/promtailconvert/testdata/azure.river b/converter/internal/promtailconvert/testdata/azure.river index bfbe087b6de4..90a652e05dab 100644 --- a/converter/internal/promtailconvert/testdata/azure.river +++ b/converter/internal/promtailconvert/testdata/azure.river @@ -10,8 +10,6 @@ discovery.azure "fun" { managed_identity { client_id = "client" } - follow_redirects = true - enable_http2 = true } local.file_match "fun" { diff --git a/converter/internal/promtailconvert/testdata/cloudflare_relabel.river b/converter/internal/promtailconvert/testdata/cloudflare_relabel.river index 201ce8f30356..014d812eab61 100644 --- a/converter/internal/promtailconvert/testdata/cloudflare_relabel.river +++ b/converter/internal/promtailconvert/testdata/cloudflare_relabel.river @@ -5,7 +5,6 @@ loki.relabel "fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.cloudflare "fun" { diff --git a/converter/internal/promtailconvert/testdata/consul.river b/converter/internal/promtailconvert/testdata/consul.river index 20b07e0900b3..72563a502d95 100644 --- a/converter/internal/promtailconvert/testdata/consul.river +++ b/converter/internal/promtailconvert/testdata/consul.river @@ -17,8 +17,6 @@ discovery.consul "fun" { username = "toby" password = "this_password_is_safe_innit?" } - follow_redirects = true - enable_http2 = true } discovery.relabel "fun" { diff --git a/converter/internal/promtailconvert/testdata/digitalocean.river b/converter/internal/promtailconvert/testdata/digitalocean.river index 7308cfa33489..fb71e471c56f 100644 --- a/converter/internal/promtailconvert/testdata/digitalocean.river +++ b/converter/internal/promtailconvert/testdata/digitalocean.river @@ -1,8 +1,6 @@ discovery.digitalocean "fun" { refresh_interval = "10m0s" port = 1234 - follow_redirects = true - enable_http2 = true } local.file_match "fun" { diff --git a/converter/internal/promtailconvert/testdata/docker.river b/converter/internal/promtailconvert/testdata/docker.river index c55f4f5bba4d..944a06360a19 100644 --- a/converter/internal/promtailconvert/testdata/docker.river +++ b/converter/internal/promtailconvert/testdata/docker.river @@ -28,6 +28,30 @@ discovery.docker "fun" { } } +loki.source.docker "fun" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.fun.targets + forward_to = [] + relabel_rules = null + + http_client_config { + basic_auth { + username = "robin" + password_file = "/home/robin/.password" + } + proxy_url = "http://proxy.example.com" + + tls_config { + ca_file = "/home/robin/.ca" + cert_file = "/home/robin/.cert" + key_file = "/home/robin/.key" + server_name = "example.local" + insecure_skip_verify = true + } + } + refresh_interval = "10s" +} + discovery.docker "fun_2" { host = "unix:///var/run/docker.sock" port = 54321 @@ -52,14 +76,25 @@ discovery.docker "fun_2" { } } -local.file_match "fun" { - path_targets = concat( - discovery.docker.fun.targets, - discovery.docker.fun_2.targets, - ) -} +loki.source.docker "fun_2" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.fun_2.targets + forward_to = [] + relabel_rules = null + + http_client_config { + oauth2 { + client_id = "client_id" + client_secret_file = "foo/bar" + scopes = ["scope1", "scope2"] + token_url = "https://example/oauth2/token" + endpoint_params = { + host = "example", + path = "/oauth2/token", + } -loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + tls_config { } + } + } + refresh_interval = "10s" } diff --git a/converter/internal/promtailconvert/testdata/docker_relabel.river b/converter/internal/promtailconvert/testdata/docker_relabel.river new file mode 100644 index 000000000000..0c06ffa48124 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/docker_relabel.river @@ -0,0 +1,63 @@ +discovery.docker "flog_scrape" { + host = "unix:///var/run/docker.sock" + refresh_interval = "5s" +} + +discovery.relabel "flog_scrape" { + targets = [] + + rule { + source_labels = ["__meta_docker_container_name"] + regex = "/(.*)" + target_label = "container" + } +} + +loki.source.docker "flog_scrape" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.flog_scrape.targets + forward_to = [loki.write.default.receiver] + relabel_rules = discovery.relabel.flog_scrape.rules + refresh_interval = "5s" +} + +discovery.docker "scrape_two" { + host = "unix:///var/run/second_docker_why_not.sock" +} + +loki.process "scrape_two" { + forward_to = [loki.write.default.receiver] + + stage.json { + expressions = { + face = "smiley", + hand = "thumbs-up", + } + source = "video" + drop_malformed = true + } +} + +discovery.relabel "scrape_two" { + targets = [] + + rule { + source_labels = ["__trail__"] + target_label = "__path__" + } +} + +loki.source.docker "scrape_two" { + host = "unix:///var/run/second_docker_why_not.sock" + targets = discovery.docker.scrape_two.targets + forward_to = [loki.process.scrape_two.receiver] + relabel_rules = discovery.relabel.scrape_two.rules +} + +loki.write "default" { + endpoint { + url = "http://gateway:3100/loki/api/v1/push" + tenant_id = "tenant1" + } + external_labels = {} +} diff --git a/converter/internal/promtailconvert/testdata/docker_relabel.yaml b/converter/internal/promtailconvert/testdata/docker_relabel.yaml new file mode 100644 index 000000000000..a9b090183979 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/docker_relabel.yaml @@ -0,0 +1,37 @@ +tracing: { enabled: false } +server: + http_listen_port: 9080 + grpc_listen_port: 0 + register_instrumentation: false + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://gateway:3100/loki/api/v1/push + tenant_id: tenant1 + +scrape_configs: + - job_name: flog_scrape + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + relabel_configs: + - source_labels: [ '__meta_docker_container_name' ] + regex: '/(.*)' + target_label: 'container' + - job_name: scrape_two + docker_sd_configs: + - host: unix:///var/run/second_docker_why_not.sock + refresh_interval: 1m + pipeline_stages: + - json: + expressions: + face: smiley + hand: thumbs-up + source: video + drop_malformed: true + relabel_configs: + - source_labels: + - __trail__ + target_label: __path__ diff --git a/converter/internal/promtailconvert/testdata/mixed_pipeline.river b/converter/internal/promtailconvert/testdata/mixed_pipeline.river new file mode 100644 index 000000000000..24fe5221cfc5 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/mixed_pipeline.river @@ -0,0 +1,75 @@ +discovery.kubernetes "uber_pipeline" { + role = "pod" + kubeconfig_file = "/home/toby/.kube/config" +} + +discovery.consulagent "uber_pipeline" { + datacenter = "bigdata" +} + +discovery.relabel "uber_pipeline" { + targets = concat( + discovery.kubernetes.uber_pipeline.targets, + discovery.consulagent.uber_pipeline.targets, + ) + + rule { + source_labels = ["__trail__"] + target_label = "__path__" + } +} + +local.file_match "uber_pipeline" { + path_targets = discovery.relabel.uber_pipeline.output +} + +loki.process "uber_pipeline" { + forward_to = [loki.write.default.receiver] + + stage.json { + expressions = { + face = "smiley", + hand = "thumbs-up", + } + source = "video" + drop_malformed = true + } +} + +loki.source.file "uber_pipeline" { + targets = local.file_match.uber_pipeline.targets + forward_to = [loki.process.uber_pipeline.receiver] +} + +loki.source.api "uber_pipeline" { + http { } + + grpc { } + graceful_shutdown_timeout = "0s" + forward_to = [loki.process.uber_pipeline.receiver] + labels = { + identity = "unidentified", + object_type = "flying", + } + relabel_rules = discovery.relabel.uber_pipeline.rules + use_incoming_timestamp = true +} + +discovery.docker "uber_pipeline" { + host = "unix:///var/run/second_docker_why_not.sock" +} + +loki.source.docker "uber_pipeline" { + host = "unix:///var/run/second_docker_why_not.sock" + targets = discovery.docker.uber_pipeline.targets + forward_to = [loki.process.uber_pipeline.receiver] + relabel_rules = discovery.relabel.uber_pipeline.rules +} + +loki.write "default" { + endpoint { + url = "http://gateway:3100/loki/api/v1/push" + tenant_id = "tenant1" + } + external_labels = {} +} diff --git a/converter/internal/promtailconvert/testdata/mixed_pipeline.yaml b/converter/internal/promtailconvert/testdata/mixed_pipeline.yaml new file mode 100644 index 000000000000..74f356e55972 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/mixed_pipeline.yaml @@ -0,0 +1,48 @@ +tracing: { enabled: false } +server: + http_listen_port: 9080 + grpc_listen_port: 0 + register_instrumentation: false + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://gateway:3100/loki/api/v1/push + tenant_id: tenant1 + +scrape_configs: + # Trying to combine all the special cases in one scrape config + - job_name: uber_pipeline + # one typical SD config + kubernetes_sd_configs: + - role: pod + kubeconfig_file: /home/toby/.kube/config + + # one typical logs producing config + loki_push_api: + use_incoming_timestamp: true + labels: + identity: unidentified + object_type: flying + + # this one is handled in a special way + consulagent_sd_configs: + - server: 'localhost:8500' + datacenter: bigdata + + # this one is also handled in a special way + docker_sd_configs: + - host: unix:///var/run/second_docker_why_not.sock + refresh_interval: 1m + pipeline_stages: + - json: + expressions: + face: smiley + hand: thumbs-up + source: video + drop_malformed: true + relabel_configs: + - source_labels: + - __trail__ + target_label: __path__ diff --git a/converter/internal/promtailconvert/testdata/windowsevents_relabel.river b/converter/internal/promtailconvert/testdata/windowsevents_relabel.river index 6fa1b693dc1a..39d28dea7a67 100644 --- a/converter/internal/promtailconvert/testdata/windowsevents_relabel.river +++ b/converter/internal/promtailconvert/testdata/windowsevents_relabel.river @@ -5,7 +5,6 @@ loki.relabel "fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.windowsevent "fun" { diff --git a/converter/internal/staticconvert/internal/build/app_agent_receiver.go b/converter/internal/staticconvert/internal/build/app_agent_receiver.go new file mode 100644 index 000000000000..d9bc0267c030 --- /dev/null +++ b/converter/internal/staticconvert/internal/build/app_agent_receiver.go @@ -0,0 +1,84 @@ +package build + +import ( + "fmt" + + "github.com/alecthomas/units" + "github.com/grafana/agent/component/common/loki" + "github.com/grafana/agent/component/faro/receiver" + "github.com/grafana/agent/component/otelcol" + "github.com/grafana/agent/converter/diag" + "github.com/grafana/agent/converter/internal/common" + app_agent_receiver_v2 "github.com/grafana/agent/pkg/integrations/v2/app_agent_receiver" + "github.com/grafana/river/rivertypes" + "github.com/grafana/river/scanner" +) + +func (b *IntegrationsConfigBuilder) appendAppAgentReceiverV2(config *app_agent_receiver_v2.Config) { + args := toAppAgentReceiverV2(config) + + compLabel, err := scanner.SanitizeIdentifier(b.formatJobName(config.Name(), nil)) + if err != nil { + b.diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to sanitize job name: %s", err)) + } + + b.f.Body().AppendBlock(common.NewBlockWithOverride( + []string{"faro", "receiver"}, + compLabel, + args, + )) +} + +func toAppAgentReceiverV2(config *app_agent_receiver_v2.Config) *receiver.Arguments { + var logLabels map[string]string + if config.LogsLabels != nil { + logLabels = config.LogsLabels + } + + logsReceiver := common.ConvertLogsReceiver{} + if config.LogsInstance != "" { + compLabel, err := scanner.SanitizeIdentifier("logs_" + config.LogsInstance) + if err != nil { + panic(fmt.Errorf("failed to sanitize job name: %s", err)) + } + + logsReceiver.Expr = fmt.Sprintf("loki.write.%s.receiver", compLabel) + } + + return &receiver.Arguments{ + LogLabels: logLabels, + Server: receiver.ServerArguments{ + Host: config.Server.Host, + Port: config.Server.Port, + CORSAllowedOrigins: config.Server.CORSAllowedOrigins, + APIKey: rivertypes.Secret(config.Server.APIKey), + MaxAllowedPayloadSize: units.Base2Bytes(config.Server.MaxAllowedPayloadSize), + RateLimiting: receiver.RateLimitingArguments{ + Enabled: config.Server.RateLimiting.Enabled, + Rate: config.Server.RateLimiting.RPS, + BurstSize: float64(config.Server.RateLimiting.Burstiness), + }, + }, + SourceMaps: receiver.SourceMapsArguments{ + Download: config.SourceMaps.Download, + DownloadFromOrigins: config.SourceMaps.DownloadFromOrigins, + DownloadTimeout: config.SourceMaps.DownloadTimeout, + Locations: toLocationArguments(config.SourceMaps.FileSystem), + }, + Output: receiver.OutputArguments{ + Logs: []loki.LogsReceiver{logsReceiver}, + Traces: []otelcol.Consumer{}, + }, + } +} + +func toLocationArguments(locations []app_agent_receiver_v2.SourceMapFileLocation) []receiver.LocationArguments { + args := make([]receiver.LocationArguments, len(locations)) + for i, location := range locations { + args[i] = receiver.LocationArguments{ + Path: location.Path, + MinifiedPathPrefix: location.MinifiedPathPrefix, + } + } + return args +} diff --git a/converter/internal/staticconvert/internal/build/azure_exporter.go b/converter/internal/staticconvert/internal/build/azure_exporter.go index 1a6a39f7c842..b51b36103d44 100644 --- a/converter/internal/staticconvert/internal/build/azure_exporter.go +++ b/converter/internal/staticconvert/internal/build/azure_exporter.go @@ -25,5 +25,7 @@ func toAzureExporter(config *azure_exporter.Config) *azure.Arguments { MetricNameTemplate: config.MetricNameTemplate, MetricHelpTemplate: config.MetricHelpTemplate, AzureCloudEnvironment: config.AzureCloudEnvironment, + ValidateDimensions: config.ValidateDimensions, + Regions: config.Regions, } } diff --git a/converter/internal/staticconvert/internal/build/blackbox_exporter.go b/converter/internal/staticconvert/internal/build/blackbox_exporter.go index 5282f9440b34..70007319ae1b 100644 --- a/converter/internal/staticconvert/internal/build/blackbox_exporter.go +++ b/converter/internal/staticconvert/internal/build/blackbox_exporter.go @@ -5,7 +5,6 @@ import ( "github.com/grafana/agent/component/discovery" "github.com/grafana/agent/component/prometheus/exporter/blackbox" - "github.com/grafana/agent/converter/internal/common" "github.com/grafana/agent/pkg/integrations/blackbox_exporter" blackbox_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/blackbox_exporter" "github.com/grafana/river/rivertypes" @@ -57,7 +56,7 @@ func toBlackboxTargets(blackboxTargets []blackbox_exporter.BlackboxTarget) black func toBlackboxTarget(target blackbox_exporter.BlackboxTarget) blackbox.BlackboxTarget { return blackbox.BlackboxTarget{ - Name: common.SanitizeIdentifierPanics(target.Name), + Name: target.Name, Target: target.Target, Module: target.Module, } diff --git a/converter/internal/staticconvert/internal/build/builder.go b/converter/internal/staticconvert/internal/build/builder.go index 07a2b7a000ce..58fedf6225c2 100644 --- a/converter/internal/staticconvert/internal/build/builder.go +++ b/converter/internal/staticconvert/internal/build/builder.go @@ -39,10 +39,13 @@ import ( "github.com/grafana/agent/pkg/integrations/statsd_exporter" agent_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/agent" apache_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/apache_http" + app_agent_receiver_v2 "github.com/grafana/agent/pkg/integrations/v2/app_agent_receiver" blackbox_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/blackbox_exporter" common_v2 "github.com/grafana/agent/pkg/integrations/v2/common" - "github.com/grafana/agent/pkg/integrations/v2/metricsutils" + eventhandler_v2 "github.com/grafana/agent/pkg/integrations/v2/eventhandler" + metricsutils_v2 "github.com/grafana/agent/pkg/integrations/v2/metricsutils" snmp_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/snmp_exporter" + vmware_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/vmware_exporter" "github.com/grafana/agent/pkg/integrations/windows_exporter" "github.com/grafana/river/scanner" "github.com/grafana/river/token/builder" @@ -163,9 +166,23 @@ func (b *IntegrationsConfigBuilder) appendV1Integrations() { } func (b *IntegrationsConfigBuilder) appendExporter(commonConfig *int_config.Common, name string, extraTargets []discovery.Target) { + var relabelConfigs []*relabel.Config + if commonConfig.InstanceKey != nil { + defaultConfig := relabel.DefaultRelabelConfig + relabelConfig := &defaultConfig + relabelConfig.TargetLabel = "instance" + relabelConfig.Replacement = *commonConfig.InstanceKey + + relabelConfigs = append(relabelConfigs, relabelConfig) + } + + if relabelConfig := b.getJobRelabelConfig(name, commonConfig.RelabelConfigs); relabelConfig != nil { + relabelConfigs = append(relabelConfigs, b.getJobRelabelConfig(name, commonConfig.RelabelConfigs)) + } + scrapeConfig := prom_config.DefaultScrapeConfig scrapeConfig.JobName = b.formatJobName(name, nil) - scrapeConfig.RelabelConfigs = commonConfig.RelabelConfigs + scrapeConfig.RelabelConfigs = append(commonConfig.RelabelConfigs, relabelConfigs...) scrapeConfig.MetricRelabelConfigs = commonConfig.MetricRelabelConfigs scrapeConfig.HTTPClientConfig.TLSConfig = b.cfg.Integrations.ConfigV1.TLSConfig @@ -207,13 +224,21 @@ func (b *IntegrationsConfigBuilder) appendV2Integrations() { case *apache_exporter_v2.Config: exports = b.appendApacheExporterV2(itg) commonConfig = itg.Common + case *app_agent_receiver_v2.Config: + b.appendAppAgentReceiverV2(itg) + commonConfig = itg.Common case *blackbox_exporter_v2.Config: exports = b.appendBlackboxExporterV2(itg) commonConfig = itg.Common + case *eventhandler_v2.Config: + b.appendEventHandlerV2(itg) case *snmp_exporter_v2.Config: exports = b.appendSnmpExporterV2(itg) commonConfig = itg.Common - case *metricsutils.ConfigShim: + case *vmware_exporter_v2.Config: + exports = b.appendVmwareExporterV2(itg) + commonConfig = itg.Common + case *metricsutils_v2.ConfigShim: commonConfig = itg.Common switch v1_itg := itg.Orig.(type) { case *azure_exporter.Config: @@ -282,6 +307,19 @@ func (b *IntegrationsConfigBuilder) appendExporterV2(commonConfig *common_v2.Met relabelConfigs = append(relabelConfigs, relabelConfig) } + if commonConfig.InstanceKey != nil { + defaultConfig := relabel.DefaultRelabelConfig + relabelConfig := &defaultConfig + relabelConfig.TargetLabel = "instance" + relabelConfig.Replacement = *commonConfig.InstanceKey + + relabelConfigs = append(relabelConfigs, relabelConfig) + } + + if relabelConfig := b.getJobRelabelConfig(name, commonConfig.Autoscrape.RelabelConfigs); relabelConfig != nil { + relabelConfigs = append(relabelConfigs, relabelConfig) + } + commonConfig.ApplyDefaults(b.cfg.Integrations.ConfigV2.Metrics.Autoscrape) scrapeConfig := prom_config.DefaultScrapeConfig scrapeConfig.JobName = b.formatJobName(name, commonConfig.InstanceKey) @@ -367,3 +405,18 @@ func (b *IntegrationsConfigBuilder) appendExporterBlock(args component.Arguments return common.NewDiscoveryExports(fmt.Sprintf("prometheus.exporter.%s.%s.targets", exporterName, compLabel)) } + +func (b *IntegrationsConfigBuilder) getJobRelabelConfig(name string, relabelConfigs []*relabel.Config) *relabel.Config { + // Don't add a job relabel if that label is already targeted + for _, relabelConfig := range relabelConfigs { + if relabelConfig.TargetLabel == "job" { + return nil + } + } + + defaultConfig := relabel.DefaultRelabelConfig + relabelConfig := &defaultConfig + relabelConfig.TargetLabel = "job" + relabelConfig.Replacement = "integrations/" + name + return relabelConfig +} diff --git a/converter/internal/staticconvert/internal/build/cadvisor_exporter.go b/converter/internal/staticconvert/internal/build/cadvisor_exporter.go index dbedebfb2967..0c6445c5376f 100644 --- a/converter/internal/staticconvert/internal/build/cadvisor_exporter.go +++ b/converter/internal/staticconvert/internal/build/cadvisor_exporter.go @@ -33,5 +33,6 @@ func toCadvisorExporter(config *cadvisor_integration.Config) *cadvisor.Arguments DockerTLSKey: config.DockerTLSKey, DockerTLSCA: config.DockerTLSCA, DockerOnly: config.DockerOnly, + DisableRootCgroupStats: config.DisableRootCgroupStats, } } diff --git a/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go b/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go index 97022a1f182f..67dda9e0285c 100644 --- a/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go +++ b/converter/internal/staticconvert/internal/build/elasticsearch_exporter.go @@ -1,9 +1,11 @@ package build import ( + commonCfg "github.com/grafana/agent/component/common/config" "github.com/grafana/agent/component/discovery" "github.com/grafana/agent/component/prometheus/exporter/elasticsearch" "github.com/grafana/agent/pkg/integrations/elasticsearch_exporter" + "github.com/grafana/river/rivertypes" ) func (b *IntegrationsConfigBuilder) appendElasticsearchExporter(config *elasticsearch_exporter.Config, instanceKey *string) discovery.Exports { @@ -12,7 +14,7 @@ func (b *IntegrationsConfigBuilder) appendElasticsearchExporter(config *elastics } func toElasticsearchExporter(config *elasticsearch_exporter.Config) *elasticsearch.Arguments { - return &elasticsearch.Arguments{ + arg := &elasticsearch.Arguments{ Address: config.Address, Timeout: config.Timeout, AllNodes: config.AllNodes, @@ -31,4 +33,14 @@ func toElasticsearchExporter(config *elasticsearch_exporter.Config) *elasticsear ExportDataStreams: config.ExportDataStreams, ExportSLM: config.ExportSLM, } + + if config.BasicAuth != nil { + arg.BasicAuth = &commonCfg.BasicAuth{ + Username: config.BasicAuth.Username, + Password: rivertypes.Secret(config.BasicAuth.Password), + PasswordFile: config.BasicAuth.PasswordFile, + } + } + + return arg } diff --git a/converter/internal/staticconvert/internal/build/eventhandler.go b/converter/internal/staticconvert/internal/build/eventhandler.go new file mode 100644 index 000000000000..bf816d6d451a --- /dev/null +++ b/converter/internal/staticconvert/internal/build/eventhandler.go @@ -0,0 +1,98 @@ +package build + +import ( + "fmt" + + "github.com/grafana/agent/component/common/loki" + flow_relabel "github.com/grafana/agent/component/common/relabel" + "github.com/grafana/agent/component/loki/relabel" + "github.com/grafana/agent/component/loki/source/kubernetes_events" + "github.com/grafana/agent/converter/diag" + "github.com/grafana/agent/converter/internal/common" + eventhandler_v2 "github.com/grafana/agent/pkg/integrations/v2/eventhandler" + "github.com/grafana/river/scanner" +) + +func (b *IntegrationsConfigBuilder) appendEventHandlerV2(config *eventhandler_v2.Config) { + compLabel, err := scanner.SanitizeIdentifier(b.formatJobName(config.Name(), nil)) + if err != nil { + b.diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to sanitize job name: %s", err)) + } + + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.SendTimeout, eventhandler_v2.DefaultConfig.SendTimeout, "eventhandler send_timeout", "this field is not configurable in flow mode")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.CachePath, eventhandler_v2.DefaultConfig.CachePath, "eventhandler cache_path", "this field is not configurable in flow mode")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.InformerResync, eventhandler_v2.DefaultConfig.InformerResync, "eventhandler informer_resync", "this field is not configurable in flow mode")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.FlushInterval, eventhandler_v2.DefaultConfig.FlushInterval, "eventhandler flush_interval", "this field is not configurable in flow mode")) + + receiver := getLogsReceiver(config) + if len(config.ExtraLabels) > 0 { + receiver = b.injectExtraLabels(config, receiver, compLabel) + } + + args := toEventHandlerV2(config, receiver) + + b.f.Body().AppendBlock(common.NewBlockWithOverride( + []string{"loki", "source", "kubernetes_events"}, + compLabel, + args, + )) +} + +func (b *IntegrationsConfigBuilder) injectExtraLabels(config *eventhandler_v2.Config, receiver common.ConvertLogsReceiver, compLabel string) common.ConvertLogsReceiver { + var relabelConfigs []*flow_relabel.Config + for _, extraLabel := range config.ExtraLabels { + defaultConfig := flow_relabel.DefaultRelabelConfig + relabelConfig := &defaultConfig + relabelConfig.SourceLabels = []string{"__address__"} + relabelConfig.TargetLabel = extraLabel.Name + relabelConfig.Replacement = extraLabel.Value + + relabelConfigs = append(relabelConfigs, relabelConfig) + } + + relabelArgs := relabel.Arguments{ + ForwardTo: []loki.LogsReceiver{receiver}, + RelabelConfigs: relabelConfigs, + MaxCacheSize: relabel.DefaultArguments.MaxCacheSize, + } + + b.f.Body().AppendBlock(common.NewBlockWithOverride( + []string{"loki", "relabel"}, + compLabel, + relabelArgs, + )) + + return common.ConvertLogsReceiver{ + Expr: fmt.Sprintf("loki.relabel.%s.receiver", compLabel), + } +} + +func getLogsReceiver(config *eventhandler_v2.Config) common.ConvertLogsReceiver { + logsReceiver := common.ConvertLogsReceiver{} + if config.LogsInstance != "" { + compLabel, err := scanner.SanitizeIdentifier("logs_" + config.LogsInstance) + if err != nil { + panic(fmt.Errorf("failed to sanitize job name: %s", err)) + } + + logsReceiver.Expr = fmt.Sprintf("loki.write.%s.receiver", compLabel) + } + + return logsReceiver +} + +func toEventHandlerV2(config *eventhandler_v2.Config, receiver common.ConvertLogsReceiver) *kubernetes_events.Arguments { + defaultOverrides := kubernetes_events.DefaultArguments + defaultOverrides.Client.KubeConfig = config.KubeconfigPath + if config.Namespace != "" { + defaultOverrides.Namespaces = []string{config.Namespace} + } + + return &kubernetes_events.Arguments{ + ForwardTo: []loki.LogsReceiver{receiver}, + JobName: kubernetes_events.DefaultArguments.JobName, + Namespaces: defaultOverrides.Namespaces, + LogFormat: config.LogFormat, + Client: defaultOverrides.Client, + } +} diff --git a/converter/internal/staticconvert/internal/build/kafka_exporter.go b/converter/internal/staticconvert/internal/build/kafka_exporter.go index 25310e35a5f4..16be4275ddce 100644 --- a/converter/internal/staticconvert/internal/build/kafka_exporter.go +++ b/converter/internal/staticconvert/internal/build/kafka_exporter.go @@ -4,6 +4,7 @@ import ( "github.com/grafana/agent/component/discovery" "github.com/grafana/agent/component/prometheus/exporter/kafka" "github.com/grafana/agent/pkg/integrations/kafka_exporter" + "github.com/grafana/river/rivertypes" ) func (b *IntegrationsConfigBuilder) appendKafkaExporter(config *kafka_exporter.Config, instanceKey *string) discovery.Exports { @@ -17,7 +18,7 @@ func toKafkaExporter(config *kafka_exporter.Config) *kafka.Arguments { UseSASL: config.UseSASL, UseSASLHandshake: config.UseSASLHandshake, SASLUsername: config.SASLUsername, - SASLPassword: config.SASLPassword, + SASLPassword: rivertypes.Secret(config.SASLPassword), SASLMechanism: config.SASLMechanism, UseTLS: config.UseTLS, CAFile: config.CAFile, diff --git a/converter/internal/staticconvert/internal/build/vmware_exporter.go b/converter/internal/staticconvert/internal/build/vmware_exporter.go new file mode 100644 index 000000000000..61b595330b6d --- /dev/null +++ b/converter/internal/staticconvert/internal/build/vmware_exporter.go @@ -0,0 +1,25 @@ +package build + +import ( + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/prometheus/exporter/vsphere" + vmware_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/vmware_exporter" + "github.com/grafana/river/rivertypes" +) + +func (b *IntegrationsConfigBuilder) appendVmwareExporterV2(config *vmware_exporter_v2.Config) discovery.Exports { + args := toVmwareExporter(config) + return b.appendExporterBlock(args, config.Name(), nil, "vsphere") +} + +func toVmwareExporter(config *vmware_exporter_v2.Config) *vsphere.Arguments { + return &vsphere.Arguments{ + ChunkSize: config.ChunkSize, + CollectConcurrency: config.CollectConcurrency, + VSphereURL: config.VSphereURL, + VSphereUser: config.VSphereUser, + VSpherePass: rivertypes.Secret(config.VSpherePass), + ObjectDiscoveryInterval: config.ObjectDiscoveryInterval, + EnableExporterMetrics: config.EnableExporterMetrics, + } +} diff --git a/converter/internal/staticconvert/internal/build/windows_exporter.go b/converter/internal/staticconvert/internal/build/windows_exporter.go index 100a8761ccfc..73aa706e8235 100644 --- a/converter/internal/staticconvert/internal/build/windows_exporter.go +++ b/converter/internal/staticconvert/internal/build/windows_exporter.go @@ -1,6 +1,8 @@ package build import ( + "strings" + "github.com/grafana/agent/component/discovery" "github.com/grafana/agent/component/prometheus/exporter/windows" "github.com/grafana/agent/pkg/integrations/windows_exporter" @@ -13,12 +15,12 @@ func (b *IntegrationsConfigBuilder) appendWindowsExporter(config *windows_export func toWindowsExporter(config *windows_exporter.Config) *windows.Arguments { return &windows.Arguments{ - EnabledCollectors: splitByCommaNullOnEmpty(config.EnabledCollectors), + EnabledCollectors: strings.Split(config.EnabledCollectors, ","), Dfsr: windows.DfsrConfig{ - SourcesEnabled: splitByCommaNullOnEmpty(config.Dfsr.SourcesEnabled), + SourcesEnabled: strings.Split(config.Dfsr.SourcesEnabled, ","), }, Exchange: windows.ExchangeConfig{ - EnabledList: splitByCommaNullOnEmpty(config.Exchange.EnabledList), + EnabledList: strings.Split(config.Exchange.EnabledList, ","), }, IIS: windows.IISConfig{ AppBlackList: config.IIS.AppBlackList, @@ -40,13 +42,13 @@ func toWindowsExporter(config *windows_exporter.Config) *windows.Arguments { Where: config.MSMQ.Where, }, MSSQL: windows.MSSQLConfig{ - EnabledClasses: splitByCommaNullOnEmpty(config.MSSQL.EnabledClasses), + EnabledClasses: strings.Split(config.MSSQL.EnabledClasses, ","), }, Network: windows.NetworkConfig{ BlackList: config.Network.BlackList, WhiteList: config.Network.WhiteList, - Exclude: config.Network.Include, - Include: config.Network.Exclude, + Exclude: config.Network.Exclude, + Include: config.Network.Include, }, Process: windows.ProcessConfig{ BlackList: config.Process.BlackList, diff --git a/converter/internal/staticconvert/staticconvert.go b/converter/internal/staticconvert/staticconvert.go index 3b66e2a817fb..5540446b50c1 100644 --- a/converter/internal/staticconvert/staticconvert.go +++ b/converter/internal/staticconvert/staticconvert.go @@ -31,7 +31,7 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { var diags diag.Diagnostics fs := flag.NewFlagSet("convert", flag.ContinueOnError) - args := []string{"-config.file", "convert", "-config.expand-env"} + args := []string{"-config.file", "convert"} args = append(args, extraArgs...) staticConfig, err := config.LoadFromFunc(fs, args, func(_, _ string, expandEnvVars bool, c *config.Config) error { return config.LoadBytes(in, expandEnvVars, c) diff --git a/converter/internal/staticconvert/staticconvert_test.go b/converter/internal/staticconvert/staticconvert_test.go index f85e880b37d8..42a79b248eeb 100644 --- a/converter/internal/staticconvert/staticconvert_test.go +++ b/converter/internal/staticconvert/staticconvert_test.go @@ -10,8 +10,8 @@ import ( ) func TestConvert(t *testing.T) { - test_common.TestDirectory(t, "testdata", ".yaml", true, []string{}, staticconvert.Convert) - test_common.TestDirectory(t, "testdata-v2", ".yaml", true, []string{"-enable-features", "integrations-next"}, staticconvert.Convert) + test_common.TestDirectory(t, "testdata", ".yaml", true, []string{"-config.expand-env"}, staticconvert.Convert) + test_common.TestDirectory(t, "testdata-v2", ".yaml", true, []string{"-enable-features", "integrations-next", "-config.expand-env"}, staticconvert.Convert) if runtime.GOOS == "windows" { test_common.TestDirectory(t, "testdata_windows", ".yaml", true, []string{}, staticconvert.Convert) diff --git a/converter/internal/staticconvert/testdata-v2/integrations_v2.river b/converter/internal/staticconvert/testdata-v2/integrations_v2.river index f3d134cdc79a..919af1b47286 100644 --- a/converter/internal/staticconvert/testdata-v2/integrations_v2.river +++ b/converter/internal/staticconvert/testdata-v2/integrations_v2.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_default" { endpoint { - name = "default-8be96f" + name = "default-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -9,14 +9,60 @@ prometheus.remote_write "metrics_default" { } } +loki.write "logs_log_config" { + endpoint { + url = "http://localhost/loki/api/v1/push" + } + external_labels = {} +} + +logging { + level = "debug" + format = "json" +} + +loki.relabel "integrations_eventhandler" { + forward_to = [loki.write.logs_log_config.receiver] + + rule { + source_labels = ["__address__"] + target_label = "test_label" + replacement = "test_label_value" + } + + rule { + source_labels = ["__address__"] + target_label = "test_label_2" + replacement = "test_label_value_2" + } +} + +loki.source.kubernetes_events "integrations_eventhandler" { + forward_to = [loki.relabel.integrations_eventhandler.receiver] +} + prometheus.exporter.azure "integrations_azure1" { subscriptions = ["subId"] resource_type = "Microsoft.Dashboard/grafana" metrics = ["HttpRequestCount"] } +discovery.relabel "integrations_azure1" { + targets = prometheus.exporter.azure.integrations_azure1.targets + + rule { + target_label = "instance" + replacement = "azure1" + } + + rule { + target_label = "job" + replacement = "integrations/azure" + } +} + prometheus.scrape "integrations_azure1" { - targets = prometheus.exporter.azure.integrations_azure1.targets + targets = discovery.relabel.integrations_azure1.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/azure1" } @@ -27,8 +73,22 @@ prometheus.exporter.azure "integrations_azure2" { metrics = ["HttpRequestCount"] } +discovery.relabel "integrations_azure2" { + targets = prometheus.exporter.azure.integrations_azure2.targets + + rule { + target_label = "instance" + replacement = "azure2" + } + + rule { + target_label = "job" + replacement = "integrations/azure" + } +} + prometheus.scrape "integrations_azure2" { - targets = prometheus.exporter.azure.integrations_azure2.targets + targets = discovery.relabel.integrations_azure2.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/azure2" } @@ -37,8 +97,17 @@ prometheus.exporter.cadvisor "integrations_cadvisor" { store_container_labels = false } +discovery.relabel "integrations_cadvisor" { + targets = prometheus.exporter.cadvisor.integrations_cadvisor.targets + + rule { + target_label = "job" + replacement = "integrations/cadvisor" + } +} + prometheus.scrape "integrations_cadvisor" { - targets = prometheus.exporter.cadvisor.integrations_cadvisor.targets + targets = discovery.relabel.integrations_cadvisor.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/cadvisor" } @@ -94,16 +163,34 @@ prometheus.exporter.cloudwatch "integrations_cloudwatch_exporter" { decoupled_scraping { } } +discovery.relabel "integrations_cloudwatch" { + targets = prometheus.exporter.cloudwatch.integrations_cloudwatch_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/cloudwatch" + } +} + prometheus.scrape "integrations_cloudwatch" { - targets = prometheus.exporter.cloudwatch.integrations_cloudwatch_exporter.targets + targets = discovery.relabel.integrations_cloudwatch.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/cloudwatch" } prometheus.exporter.consul "integrations_consul_exporter" { } +discovery.relabel "integrations_consul" { + targets = prometheus.exporter.consul.integrations_consul_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/consul" + } +} + prometheus.scrape "integrations_consul" { - targets = prometheus.exporter.consul.integrations_consul_exporter.targets + targets = discovery.relabel.integrations_consul.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/consul" } @@ -120,6 +207,11 @@ discovery.relabel "integrations_dnsmasq" { target_label = "instance" replacement = "dnsmasq-a" } + + rule { + target_label = "job" + replacement = "integrations/dnsmasq" + } } prometheus.scrape "integrations_dnsmasq" { @@ -130,8 +222,17 @@ prometheus.scrape "integrations_dnsmasq" { prometheus.exporter.elasticsearch "integrations_elasticsearch_exporter" { } +discovery.relabel "integrations_elasticsearch" { + targets = prometheus.exporter.elasticsearch.integrations_elasticsearch_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/elasticsearch" + } +} + prometheus.scrape "integrations_elasticsearch" { - targets = prometheus.exporter.elasticsearch.integrations_elasticsearch_exporter.targets + targets = discovery.relabel.integrations_elasticsearch.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/elasticsearch" } @@ -142,8 +243,17 @@ prometheus.exporter.gcp "integrations_gcp_exporter" { extra_filters = ["loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""] } +discovery.relabel "integrations_gcp" { + targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/gcp" + } +} + prometheus.scrape "integrations_gcp" { - targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets + targets = discovery.relabel.integrations_gcp.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/gcp" } @@ -153,16 +263,34 @@ prometheus.exporter.github "integrations_github_exporter" { api_token = "ABCDEFGH-1234-ABCD-1234-ABCDEFGHIJKL" } +discovery.relabel "integrations_github" { + targets = prometheus.exporter.github.integrations_github_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/github" + } +} + prometheus.scrape "integrations_github" { - targets = prometheus.exporter.github.integrations_github_exporter.targets + targets = discovery.relabel.integrations_github.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/github" } prometheus.exporter.kafka "integrations_kafka_exporter" { } +discovery.relabel "integrations_kafka" { + targets = prometheus.exporter.kafka.integrations_kafka_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/kafka" + } +} + prometheus.scrape "integrations_kafka" { - targets = prometheus.exporter.kafka.integrations_kafka_exporter.targets + targets = discovery.relabel.integrations_kafka.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/kafka" } @@ -179,6 +307,11 @@ discovery.relabel "integrations_memcached" { target_label = "instance" replacement = "memcached-a" } + + rule { + target_label = "job" + replacement = "integrations/memcached" + } } prometheus.scrape "integrations_memcached" { @@ -206,6 +339,11 @@ discovery.relabel "integrations_mongodb" { target_label = "mongodb_cluster" replacement = "prod-cluster" } + + rule { + target_label = "job" + replacement = "integrations/mongodb" + } } prometheus.scrape "integrations_mongodb" { @@ -218,8 +356,17 @@ prometheus.exporter.mssql "integrations_mssql" { connection_string = "sqlserver://:@:" } +discovery.relabel "integrations_mssql" { + targets = prometheus.exporter.mssql.integrations_mssql.targets + + rule { + target_label = "job" + replacement = "integrations/mssql" + } +} + prometheus.scrape "integrations_mssql" { - targets = prometheus.exporter.mssql.integrations_mssql.targets + targets = discovery.relabel.integrations_mssql.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/mssql" } @@ -236,6 +383,11 @@ discovery.relabel "integrations_mysql" { target_label = "instance" replacement = "server-a" } + + rule { + target_label = "job" + replacement = "integrations/mysql" + } } prometheus.scrape "integrations_mysql" { @@ -263,6 +415,11 @@ discovery.relabel "integrations_node_exporter" { target_label = "__address__" replacement = "localhost:8099" } + + rule { + target_label = "job" + replacement = "integrations/node_exporter" + } } prometheus.scrape "integrations_node_exporter" { @@ -289,8 +446,17 @@ prometheus.exporter.oracledb "integrations_oracledb" { connection_string = "oracle://user:password@localhost:1521/orcl.localnet" } +discovery.relabel "integrations_oracledb" { + targets = prometheus.exporter.oracledb.integrations_oracledb.targets + + rule { + target_label = "job" + replacement = "integrations/oracledb" + } +} + prometheus.scrape "integrations_oracledb" { - targets = prometheus.exporter.oracledb.integrations_oracledb.targets + targets = discovery.relabel.integrations_oracledb.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/oracledb" } @@ -307,6 +473,11 @@ discovery.relabel "integrations_postgres" { target_label = "instance" replacement = "postgres-a" } + + rule { + target_label = "job" + replacement = "integrations/postgres" + } } prometheus.scrape "integrations_postgres" { @@ -322,8 +493,17 @@ prometheus.exporter.process "integrations_process_exporter" { } } +discovery.relabel "integrations_process" { + targets = prometheus.exporter.process.integrations_process_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/process" + } +} + prometheus.scrape "integrations_process" { - targets = prometheus.exporter.process.integrations_process_exporter.targets + targets = discovery.relabel.integrations_process.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/process" } @@ -341,6 +521,11 @@ discovery.relabel "integrations_redis" { target_label = "instance" replacement = "redis-2" } + + rule { + target_label = "job" + replacement = "integrations/redis" + } } prometheus.scrape "integrations_redis" { @@ -356,8 +541,17 @@ prometheus.exporter.snowflake "integrations_snowflake" { warehouse = "SNOWFLAKE_WAREHOUSE" } +discovery.relabel "integrations_snowflake" { + targets = prometheus.exporter.snowflake.integrations_snowflake.targets + + rule { + target_label = "job" + replacement = "integrations/snowflake" + } +} + prometheus.scrape "integrations_snowflake" { - targets = prometheus.exporter.snowflake.integrations_snowflake.targets + targets = discovery.relabel.integrations_snowflake.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/snowflake" } @@ -366,8 +560,17 @@ prometheus.exporter.squid "integrations_squid" { address = "localhost:3128" } +discovery.relabel "integrations_squid" { + targets = prometheus.exporter.squid.integrations_squid.targets + + rule { + target_label = "job" + replacement = "integrations/squid" + } +} + prometheus.scrape "integrations_squid" { - targets = prometheus.exporter.squid.integrations_squid.targets + targets = discovery.relabel.integrations_squid.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/squid" scrape_timeout = "1m0s" @@ -375,8 +578,17 @@ prometheus.scrape "integrations_squid" { prometheus.exporter.statsd "integrations_statsd_exporter" { } +discovery.relabel "integrations_statsd" { + targets = prometheus.exporter.statsd.integrations_statsd_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/statsd" + } +} + prometheus.scrape "integrations_statsd" { - targets = prometheus.exporter.statsd.integrations_statsd_exporter.targets + targets = discovery.relabel.integrations_statsd.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/statsd" } @@ -397,6 +609,11 @@ discovery.relabel "integrations_agent" { target_label = "test_label_2" replacement = "test_label_value_2" } + + rule { + target_label = "job" + replacement = "integrations/agent" + } } prometheus.scrape "integrations_agent" { @@ -409,8 +626,22 @@ prometheus.exporter.apache "integrations_apache1" { insecure = true } +discovery.relabel "integrations_apache1" { + targets = prometheus.exporter.apache.integrations_apache1.targets + + rule { + target_label = "instance" + replacement = "apache1" + } + + rule { + target_label = "job" + replacement = "integrations/apache_http" + } +} + prometheus.scrape "integrations_apache1" { - targets = prometheus.exporter.apache.integrations_apache1.targets + targets = discovery.relabel.integrations_apache1.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/apache1" } @@ -431,6 +662,16 @@ discovery.relabel "integrations_apache2" { target_label = "test_label_2" replacement = "test_label_value_2" } + + rule { + target_label = "instance" + replacement = "apache2" + } + + rule { + target_label = "job" + replacement = "integrations/apache_http" + } } prometheus.scrape "integrations_apache2" { @@ -439,18 +680,54 @@ prometheus.scrape "integrations_apache2" { job_name = "integrations/apache2" } +faro.receiver "integrations_app_agent_receiver" { + extra_log_labels = {} + + server { + listen_address = "localhost" + listen_port = 55678 + max_allowed_payload_size = "4MiB786KiB832B" + + rate_limiting { + enabled = true + rate = 100 + burst_size = 50 + } + } + + sourcemaps { + download_from_origins = ["*"] + download_timeout = "1s" + } + + output { + logs = [loki.write.logs_log_config.receiver] + traces = [] + } +} + prometheus.exporter.blackbox "integrations_blackbox" { config = "modules:\n http_2xx:\n prober: http\n timeout: 5s\n http:\n method: POST\n headers:\n Content-Type: application/json\n body: '{}'\n preferred_ip_protocol: ip4\n" - target "example" { + target { + name = "example" address = "http://example.com" module = "http_2xx" } probe_timeout_offset = "0s" } +discovery.relabel "integrations_blackbox" { + targets = prometheus.exporter.blackbox.integrations_blackbox.targets + + rule { + target_label = "job" + replacement = "integrations/blackbox" + } +} + prometheus.scrape "integrations_blackbox" { - targets = prometheus.exporter.blackbox.integrations_blackbox.targets + targets = discovery.relabel.integrations_blackbox.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/blackbox" } @@ -471,8 +748,43 @@ prometheus.exporter.snmp "integrations_snmp" { } } +discovery.relabel "integrations_snmp" { + targets = prometheus.exporter.snmp.integrations_snmp.targets + + rule { + target_label = "job" + replacement = "integrations/snmp" + } +} + prometheus.scrape "integrations_snmp" { - targets = prometheus.exporter.snmp.integrations_snmp.targets + targets = discovery.relabel.integrations_snmp.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/snmp" } + +prometheus.exporter.vsphere "integrations_vsphere" { + vsphere_url = "https://127.0.0.1:8989/sdk" + vsphere_user = "user" + vsphere_password = "pass" +} + +discovery.relabel "integrations_vsphere" { + targets = prometheus.exporter.vsphere.integrations_vsphere.targets + + rule { + target_label = "instance" + replacement = "vsphere" + } + + rule { + target_label = "job" + replacement = "integrations/vsphere" + } +} + +prometheus.scrape "integrations_vsphere" { + targets = discovery.relabel.integrations_vsphere.output + forward_to = [prometheus.remote_write.metrics_default.receiver] + job_name = "integrations/vsphere" +} diff --git a/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml b/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml index d533977c6a9f..cd0c497d15cc 100644 --- a/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml +++ b/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml @@ -1,3 +1,7 @@ +server: + log_level: ${SOME_ENVIRONMENT_VARIABLE:='debug'} + log_format: json + metrics: global: remote_write: @@ -5,6 +9,13 @@ metrics: configs: - name: default +logs: + positions_directory: /path + configs: + - name: log_config + clients: + - url: http://localhost/loki/api/v1/push + integrations: agent: autoscrape: @@ -19,6 +30,12 @@ integrations: extra_labels: test_label: test_label_value test_label_2: test_label_value_2 + app_agent_receiver_configs: + - instance: "default" + logs_instance: "log_config" + server: + host: "localhost" + port: 55678 azure_configs: - instance: "azure1" subscriptions: @@ -100,6 +117,12 @@ integrations: elasticsearch_configs: - autoscrape: metrics_instance: "default" + eventhandler: + cache_path: "./.eventcache/eventhandler.cache" + logs_instance: "log_config" + extra_labels: + test_label: test_label_value + test_label_2: test_label_value_2 gcp_configs: - project_ids: - @@ -204,4 +227,14 @@ integrations: scrape_timeout: 1m statsd: autoscrape: - metrics_instance: "default" \ No newline at end of file + metrics_instance: "default" + vsphere_configs: + - vsphere_url: https://127.0.0.1:8989/sdk + vsphere_user: user + vsphere_password: pass + request_chunk_size: 256 + collect_concurrency: 8 + instance: vsphere + autoscrape: + enable: true + metrics_instance: default \ No newline at end of file diff --git a/converter/internal/staticconvert/testdata-v2/unsupported.diags b/converter/internal/staticconvert/testdata-v2/unsupported.diags new file mode 100644 index 000000000000..cf356c13c1da --- /dev/null +++ b/converter/internal/staticconvert/testdata-v2/unsupported.diags @@ -0,0 +1,6 @@ +(Error) The converter does not support converting the provided eventhandler send_timeout config: this field is not configurable in flow mode +(Error) The converter does not support converting the provided eventhandler cache_path config: this field is not configurable in flow mode +(Error) The converter does not support converting the provided eventhandler informer_resync config: this field is not configurable in flow mode +(Error) The converter does not support converting the provided eventhandler flush_interval config: this field is not configurable in flow mode +(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. +(Error) The converter does not support converting the provided app_agent_receiver traces_instance config. \ No newline at end of file diff --git a/converter/internal/staticconvert/testdata-v2/unsupported.river b/converter/internal/staticconvert/testdata-v2/unsupported.river new file mode 100644 index 000000000000..c9585a88c5dc --- /dev/null +++ b/converter/internal/staticconvert/testdata-v2/unsupported.river @@ -0,0 +1,47 @@ +prometheus.remote_write "metrics_default" { + endpoint { + name = "default-149bbd" + url = "http://localhost:9009/api/prom/push" + + queue_config { } + + metadata_config { } + } +} + +loki.write "logs_log_config" { + endpoint { + url = "http://localhost/loki/api/v1/push" + } + external_labels = {} +} + +loki.source.kubernetes_events "integrations_eventhandler" { + forward_to = [loki.write.logs_log_config.receiver] +} + +faro.receiver "integrations_app_agent_receiver" { + extra_log_labels = {} + + server { + listen_address = "localhost" + listen_port = 55678 + max_allowed_payload_size = "4MiB786KiB832B" + + rate_limiting { + enabled = true + rate = 100 + burst_size = 50 + } + } + + sourcemaps { + download_from_origins = ["*"] + download_timeout = "1s" + } + + output { + logs = [] + traces = [] + } +} diff --git a/converter/internal/staticconvert/testdata-v2/unsupported.yaml b/converter/internal/staticconvert/testdata-v2/unsupported.yaml new file mode 100644 index 000000000000..dfce6ed22e45 --- /dev/null +++ b/converter/internal/staticconvert/testdata-v2/unsupported.yaml @@ -0,0 +1,28 @@ + +metrics: + global: + remote_write: + - url: http://localhost:9009/api/prom/push + configs: + - name: default + +logs: + positions_directory: /path + configs: + - name: log_config + clients: + - url: http://localhost/loki/api/v1/push + +integrations: + app_agent_receiver_configs: + - instance: "default" + traces_instance: "not_supported" + server: + host: "localhost" + port: 55678 + eventhandler: + cache_path: "/etc/eventhandler/not_default.cache" + logs_instance: "log_config" + send_timeout: 30 + informer_resync: 30 + flush_interval: 30 \ No newline at end of file diff --git a/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river b/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river index e01818b3faad..8d95a97c8b7b 100644 --- a/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river +++ b/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_default" { endpoint { - name = "default-8be96f" + name = "default-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -9,16 +9,19 @@ prometheus.remote_write "metrics_default" { } } -prometheus.exporter.windows "integrations_windows_exporter" { - exchange { } +prometheus.exporter.windows "integrations_windows_exporter" { } - network { - exclude = ".+" +discovery.relabel "integrations_windows" { + targets = prometheus.exporter.windows.integrations_windows_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/windows" } } prometheus.scrape "integrations_windows" { - targets = prometheus.exporter.windows.integrations_windows_exporter.targets + targets = discovery.relabel.integrations_windows.output forward_to = [prometheus.remote_write.metrics_default.receiver] job_name = "integrations/windows" } diff --git a/converter/internal/staticconvert/testdata/integrations.river b/converter/internal/staticconvert/testdata/integrations.river index 1fc2e1867c1e..07875ba5fcea 100644 --- a/converter/internal/staticconvert/testdata/integrations.river +++ b/converter/internal/staticconvert/testdata/integrations.river @@ -1,7 +1,16 @@ prometheus.exporter.agent "integrations_agent" { } +discovery.relabel "integrations_agent" { + targets = prometheus.exporter.agent.integrations_agent.targets + + rule { + target_label = "job" + replacement = "integrations/agent" + } +} + prometheus.scrape "integrations_agent" { - targets = prometheus.exporter.agent.integrations_agent.targets + targets = discovery.relabel.integrations_agent.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/agent" @@ -26,8 +35,17 @@ prometheus.exporter.apache "integrations_apache_http" { scrape_uri = "http://0.0.0.0/server-status?auto" } +discovery.relabel "integrations_apache_http" { + targets = prometheus.exporter.apache.integrations_apache_http.targets + + rule { + target_label = "job" + replacement = "integrations/apache_http" + } +} + prometheus.scrape "integrations_apache_http" { - targets = prometheus.exporter.apache.integrations_apache_http.targets + targets = discovery.relabel.integrations_apache_http.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/apache_http" @@ -41,15 +59,25 @@ prometheus.scrape "integrations_apache_http" { prometheus.exporter.blackbox "integrations_blackbox" { config = "modules:\n http_2xx:\n prober: http\n timeout: 5s\n http:\n method: POST\n headers:\n Content-Type: application/json\n body: '{}'\n preferred_ip_protocol: ip4\n" - target "example" { + target { + name = "example" address = "http://example.com" module = "http_2xx" } probe_timeout_offset = "0s" } +discovery.relabel "integrations_blackbox" { + targets = prometheus.exporter.blackbox.integrations_blackbox.targets + + rule { + target_label = "job" + replacement = "integrations/blackbox" + } +} + prometheus.scrape "integrations_blackbox" { - targets = prometheus.exporter.blackbox.integrations_blackbox.targets + targets = discovery.relabel.integrations_blackbox.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/blackbox" @@ -76,8 +104,17 @@ prometheus.exporter.snmp "integrations_snmp" { } } +discovery.relabel "integrations_snmp" { + targets = prometheus.exporter.snmp.integrations_snmp.targets + + rule { + target_label = "job" + replacement = "integrations/snmp" + } +} + prometheus.scrape "integrations_snmp" { - targets = prometheus.exporter.snmp.integrations_snmp.targets + targets = discovery.relabel.integrations_snmp.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/snmp" @@ -94,8 +131,17 @@ prometheus.exporter.azure "integrations_azure_exporter" { metrics = ["HttpRequestCount"] } +discovery.relabel "integrations_azure_exporter" { + targets = prometheus.exporter.azure.integrations_azure_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/azure_exporter" + } +} + prometheus.scrape "integrations_azure_exporter" { - targets = prometheus.exporter.azure.integrations_azure_exporter.targets + targets = discovery.relabel.integrations_azure_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/azure_exporter" @@ -108,8 +154,17 @@ prometheus.scrape "integrations_azure_exporter" { prometheus.exporter.cadvisor "integrations_cadvisor" { } +discovery.relabel "integrations_cadvisor" { + targets = prometheus.exporter.cadvisor.integrations_cadvisor.targets + + rule { + target_label = "job" + replacement = "integrations/cadvisor" + } +} + prometheus.scrape "integrations_cadvisor" { - targets = prometheus.exporter.cadvisor.integrations_cadvisor.targets + targets = discovery.relabel.integrations_cadvisor.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/cadvisor" @@ -171,8 +226,17 @@ prometheus.exporter.cloudwatch "integrations_cloudwatch_exporter" { decoupled_scraping { } } +discovery.relabel "integrations_cloudwatch_exporter" { + targets = prometheus.exporter.cloudwatch.integrations_cloudwatch_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/cloudwatch_exporter" + } +} + prometheus.scrape "integrations_cloudwatch_exporter" { - targets = prometheus.exporter.cloudwatch.integrations_cloudwatch_exporter.targets + targets = discovery.relabel.integrations_cloudwatch_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/cloudwatch_exporter" @@ -185,8 +249,17 @@ prometheus.scrape "integrations_cloudwatch_exporter" { prometheus.exporter.consul "integrations_consul_exporter" { } +discovery.relabel "integrations_consul_exporter" { + targets = prometheus.exporter.consul.integrations_consul_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/consul_exporter" + } +} + prometheus.scrape "integrations_consul_exporter" { - targets = prometheus.exporter.consul.integrations_consul_exporter.targets + targets = discovery.relabel.integrations_consul_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/consul_exporter" @@ -209,6 +282,11 @@ discovery.relabel "integrations_dnsmasq_exporter" { target_label = "instance" replacement = "dnsmasq-a" } + + rule { + target_label = "job" + replacement = "integrations/dnsmasq_exporter" + } } prometheus.scrape "integrations_dnsmasq_exporter" { @@ -225,8 +303,17 @@ prometheus.scrape "integrations_dnsmasq_exporter" { prometheus.exporter.elasticsearch "integrations_elasticsearch_exporter" { } +discovery.relabel "integrations_elasticsearch_exporter" { + targets = prometheus.exporter.elasticsearch.integrations_elasticsearch_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/elasticsearch_exporter" + } +} + prometheus.scrape "integrations_elasticsearch_exporter" { - targets = prometheus.exporter.elasticsearch.integrations_elasticsearch_exporter.targets + targets = discovery.relabel.integrations_elasticsearch_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/elasticsearch_exporter" @@ -243,8 +330,17 @@ prometheus.exporter.gcp "integrations_gcp_exporter" { extra_filters = ["loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""] } +discovery.relabel "integrations_gcp_exporter" { + targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/gcp_exporter" + } +} + prometheus.scrape "integrations_gcp_exporter" { - targets = prometheus.exporter.gcp.integrations_gcp_exporter.targets + targets = discovery.relabel.integrations_gcp_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/gcp_exporter" @@ -260,8 +356,17 @@ prometheus.exporter.github "integrations_github_exporter" { api_token = "ABCDEFGH-1234-ABCD-1234-ABCDEFGHIJKL" } +discovery.relabel "integrations_github_exporter" { + targets = prometheus.exporter.github.integrations_github_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/github_exporter" + } +} + prometheus.scrape "integrations_github_exporter" { - targets = prometheus.exporter.github.integrations_github_exporter.targets + targets = discovery.relabel.integrations_github_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/github_exporter" @@ -274,8 +379,17 @@ prometheus.scrape "integrations_github_exporter" { prometheus.exporter.kafka "integrations_kafka_exporter" { } +discovery.relabel "integrations_kafka_exporter" { + targets = prometheus.exporter.kafka.integrations_kafka_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/kafka_exporter" + } +} + prometheus.scrape "integrations_kafka_exporter" { - targets = prometheus.exporter.kafka.integrations_kafka_exporter.targets + targets = discovery.relabel.integrations_kafka_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/kafka_exporter" @@ -298,6 +412,11 @@ discovery.relabel "integrations_memcached_exporter" { target_label = "instance" replacement = "memcached-a" } + + rule { + target_label = "job" + replacement = "integrations/memcached_exporter" + } } prometheus.scrape "integrations_memcached_exporter" { @@ -331,6 +450,16 @@ discovery.relabel "integrations_mongodb_exporter" { target_label = "mongodb_cluster" replacement = "prod-cluster" } + + rule { + target_label = "instance" + replacement = "instance-key-value" + } + + rule { + target_label = "job" + replacement = "integrations/mongodb_exporter" + } } prometheus.scrape "integrations_mongodb_exporter" { @@ -349,8 +478,22 @@ prometheus.exporter.mssql "integrations_mssql" { connection_string = "sqlserver://:@:" } +discovery.relabel "integrations_mssql" { + targets = prometheus.exporter.mssql.integrations_mssql.targets + + rule { + target_label = "instance" + replacement = "instance-key-value" + } + + rule { + target_label = "job" + replacement = "integrations/mssql" + } +} + prometheus.scrape "integrations_mssql" { - targets = prometheus.exporter.mssql.integrations_mssql.targets + targets = discovery.relabel.integrations_mssql.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/mssql" @@ -373,6 +516,11 @@ discovery.relabel "integrations_mysqld_exporter" { target_label = "instance" replacement = "server-a" } + + rule { + target_label = "job" + replacement = "integrations/mysqld_exporter" + } } prometheus.scrape "integrations_mysqld_exporter" { @@ -406,6 +554,11 @@ discovery.relabel "integrations_node_exporter" { target_label = "__address__" replacement = "localhost:8099" } + + rule { + target_label = "job" + replacement = "integrations/node_exporter" + } } prometheus.scrape "integrations_node_exporter" { @@ -438,8 +591,17 @@ prometheus.exporter.oracledb "integrations_oracledb" { connection_string = "oracle://user:password@localhost:1521/orcl.localnet" } +discovery.relabel "integrations_oracledb" { + targets = prometheus.exporter.oracledb.integrations_oracledb.targets + + rule { + target_label = "job" + replacement = "integrations/oracledb" + } +} + prometheus.scrape "integrations_oracledb" { - targets = prometheus.exporter.oracledb.integrations_oracledb.targets + targets = discovery.relabel.integrations_oracledb.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/oracledb" scrape_timeout = "1m0s" @@ -463,6 +625,11 @@ discovery.relabel "integrations_postgres_exporter" { target_label = "instance" replacement = "postgres-a" } + + rule { + target_label = "job" + replacement = "integrations/postgres_exporter" + } } prometheus.scrape "integrations_postgres_exporter" { @@ -484,8 +651,17 @@ prometheus.exporter.process "integrations_process_exporter" { } } +discovery.relabel "integrations_process_exporter" { + targets = prometheus.exporter.process.integrations_process_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/process_exporter" + } +} + prometheus.scrape "integrations_process_exporter" { - targets = prometheus.exporter.process.integrations_process_exporter.targets + targets = discovery.relabel.integrations_process_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/process_exporter" @@ -509,6 +685,11 @@ discovery.relabel "integrations_redis_exporter" { target_label = "instance" replacement = "redis-2" } + + rule { + target_label = "job" + replacement = "integrations/redis_exporter" + } } prometheus.scrape "integrations_redis_exporter" { @@ -530,8 +711,17 @@ prometheus.exporter.snowflake "integrations_snowflake" { warehouse = "SNOWFLAKE_WAREHOUSE" } +discovery.relabel "integrations_snowflake" { + targets = prometheus.exporter.snowflake.integrations_snowflake.targets + + rule { + target_label = "job" + replacement = "integrations/snowflake" + } +} + prometheus.scrape "integrations_snowflake" { - targets = prometheus.exporter.snowflake.integrations_snowflake.targets + targets = discovery.relabel.integrations_snowflake.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/snowflake" @@ -546,8 +736,17 @@ prometheus.exporter.squid "integrations_squid" { address = "localhost:3128" } +discovery.relabel "integrations_squid" { + targets = prometheus.exporter.squid.integrations_squid.targets + + rule { + target_label = "job" + replacement = "integrations/squid" + } +} + prometheus.scrape "integrations_squid" { - targets = prometheus.exporter.squid.integrations_squid.targets + targets = discovery.relabel.integrations_squid.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/squid" scrape_timeout = "1m0s" @@ -561,8 +760,17 @@ prometheus.scrape "integrations_squid" { prometheus.exporter.statsd "integrations_statsd_exporter" { } +discovery.relabel "integrations_statsd_exporter" { + targets = prometheus.exporter.statsd.integrations_statsd_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/statsd_exporter" + } +} + prometheus.scrape "integrations_statsd_exporter" { - targets = prometheus.exporter.statsd.integrations_statsd_exporter.targets + targets = discovery.relabel.integrations_statsd_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/statsd_exporter" diff --git a/converter/internal/staticconvert/testdata/integrations.yaml b/converter/internal/staticconvert/testdata/integrations.yaml index a57b0f808024..ced11cf6d911 100644 --- a/converter/internal/staticconvert/testdata/integrations.yaml +++ b/converter/internal/staticconvert/testdata/integrations.yaml @@ -107,6 +107,7 @@ integrations: mongodb_exporter: enabled: true mongodb_uri: mongodb://mongodb-a:27017 + instance: 'instance-key-value' relabel_configs: - source_labels: [__address__] target_label: service_name @@ -116,6 +117,7 @@ integrations: replacement: 'prod-cluster' mssql: enabled: true + instance: 'instance-key-value' connection_string: sqlserver://:@: mysqld_exporter: enabled: true diff --git a/converter/internal/staticconvert/testdata/prom_remote_write.river b/converter/internal/staticconvert/testdata/prom_remote_write.river index df5a9848a234..2d341fed6a5b 100644 --- a/converter/internal/staticconvert/testdata/prom_remote_write.river +++ b/converter/internal/staticconvert/testdata/prom_remote_write.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_test1" { endpoint { - name = "test1-8be96f" + name = "test1-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -11,7 +11,7 @@ prometheus.remote_write "metrics_test1" { prometheus.remote_write "metrics_test2" { endpoint { - name = "test2-533083" + name = "test2-c6d55a" url = "http://localhost:9010/api/prom/push" send_exemplars = false @@ -23,7 +23,7 @@ prometheus.remote_write "metrics_test2" { prometheus.remote_write "metrics_test3" { endpoint { - name = "test3-a3c419" + name = "test3-aa96fd" url = "http://localhost:9011/api/prom/push" queue_config { } @@ -32,7 +32,7 @@ prometheus.remote_write "metrics_test3" { } endpoint { - name = "test3-41df1c" + name = "test3-a93240" url = "http://localhost:9012/api/prom/push" queue_config { @@ -45,7 +45,7 @@ prometheus.remote_write "metrics_test3" { prometheus.remote_write "metrics_test4_sigv4_defaults" { endpoint { - name = "test4_sigv4_defaults-c42e88" + name = "test4_sigv4_defaults-f815bf" url = "http://localhost:9012/api/prom/push" queue_config { } @@ -58,7 +58,7 @@ prometheus.remote_write "metrics_test4_sigv4_defaults" { prometheus.remote_write "metrics_test5_sigv4_explicit" { endpoint { - name = "test5_sigv4_explicit-050ad5" + name = "test5_sigv4_explicit-bc8fca" url = "http://localhost:9012/api/prom/push" queue_config { } @@ -77,7 +77,7 @@ prometheus.remote_write "metrics_test5_sigv4_explicit" { prometheus.remote_write "metrics_test6_azuread_defaults" { endpoint { - name = "test6_azuread_defaults-50e17f" + name = "test6_azuread_defaults-cc4e7e" url = "http://localhost:9012/api/prom/push" queue_config { } @@ -94,7 +94,7 @@ prometheus.remote_write "metrics_test6_azuread_defaults" { prometheus.remote_write "metrics_test7_azuread_explicit" { endpoint { - name = "test7_azuread_explicit-0f55f1" + name = "test7_azuread_explicit-9e1a3e" url = "http://localhost:9012/api/prom/push" queue_config { } diff --git a/converter/internal/staticconvert/testdata/prom_scrape.river b/converter/internal/staticconvert/testdata/prom_scrape.river index c7db1090e90f..f0afe395531e 100644 --- a/converter/internal/staticconvert/testdata/prom_scrape.river +++ b/converter/internal/staticconvert/testdata/prom_scrape.river @@ -10,9 +10,7 @@ discovery.azure "metrics_agent_promobee" { managed_identity { client_id = "client" } - proxy_url = "proxy" - follow_redirects = true - enable_http2 = true + proxy_url = "proxy" } discovery.azure "metrics_agent_promobee_2" { @@ -27,9 +25,7 @@ discovery.azure "metrics_agent_promobee_2" { managed_identity { client_id = "client" } - proxy_url = "proxy" - follow_redirects = true - enable_http2 = true + proxy_url = "proxy" } discovery.relabel "metrics_agent_promobee" { @@ -95,7 +91,7 @@ prometheus.relabel "metrics_agent_promobee" { prometheus.remote_write "metrics_agent" { endpoint { - name = "agent-6ea089" + name = "agent-36127e" url = "https://prometheus-us-central1.grafana.net/api/prom/push" basic_auth { @@ -107,6 +103,7 @@ prometheus.remote_write "metrics_agent" { max_shards = 10 batch_send_deadline = "3m0s" max_backoff = "10s" + sample_age_limit = "50s" } metadata_config { } diff --git a/converter/internal/staticconvert/testdata/prom_scrape.yaml b/converter/internal/staticconvert/testdata/prom_scrape.yaml index b81e865ef5d0..afffa13a2054 100644 --- a/converter/internal/staticconvert/testdata/prom_scrape.yaml +++ b/converter/internal/staticconvert/testdata/prom_scrape.yaml @@ -19,6 +19,7 @@ metrics: batch_send_deadline: 3m max_shards: 10 max_backoff: 10s + sample_age_limit: 50s basic_auth: username: 11111 password: my-secret-password-here diff --git a/converter/internal/staticconvert/testdata/promtail_prom.river b/converter/internal/staticconvert/testdata/promtail_prom.river index f3b810dbe704..1744d37aee5c 100644 --- a/converter/internal/staticconvert/testdata/promtail_prom.river +++ b/converter/internal/staticconvert/testdata/promtail_prom.river @@ -1,7 +1,5 @@ discovery.consul "metrics_name_jobName" { - services = ["myapp"] - follow_redirects = true - enable_http2 = true + services = ["myapp"] } prometheus.scrape "metrics_name_jobName" { @@ -20,7 +18,7 @@ prometheus.scrape "metrics_name_jobName" { prometheus.remote_write "metrics_name" { endpoint { - name = "name-8be96f" + name = "name-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -48,8 +46,6 @@ discovery.consul "logs_name_jobName" { username = "toby" password = "this_password_is_safe_innit?" } - follow_redirects = true - enable_http2 = true } discovery.relabel "logs_name_jobName" { @@ -101,8 +97,6 @@ discovery.consul "logs_name2_jobName" { username = "toby" password = "this_password_is_safe_innit?" } - follow_redirects = true - enable_http2 = true } discovery.relabel "logs_name2_jobName" { diff --git a/converter/internal/staticconvert/testdata/promtail_scrape.river b/converter/internal/staticconvert/testdata/promtail_scrape.river index 46efa90bcc9f..22ee8576ed96 100644 --- a/converter/internal/staticconvert/testdata/promtail_scrape.river +++ b/converter/internal/staticconvert/testdata/promtail_scrape.river @@ -5,7 +5,6 @@ loki.relabel "logs_log_config_fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.cloudflare "logs_log_config_fun" { diff --git a/converter/internal/staticconvert/testdata/sanitize.river b/converter/internal/staticconvert/testdata/sanitize.river index 38dabad4a85c..eaacf45291b6 100644 --- a/converter/internal/staticconvert/testdata/sanitize.river +++ b/converter/internal/staticconvert/testdata/sanitize.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_integrations" { endpoint { - name = "integrations-717d0f" + name = "integrations-ce3432" url = "https://region.grafana.net/api/prom/push" basic_auth { @@ -37,7 +37,6 @@ loki.relabel "logs_integrations_integrations_windows_exporter_application" { source_labels = ["computer"] target_label = "agent_hostname" } - max_cache_size = 0 } loki.source.windowsevent "logs_integrations_integrations_windows_exporter_application" { @@ -75,7 +74,6 @@ loki.relabel "logs_integrations_integrations_windows_exporter_system" { source_labels = ["computer"] target_label = "agent_hostname" } - max_cache_size = 0 } loki.source.windowsevent "logs_integrations_integrations_windows_exporter_system" { diff --git a/converter/internal/staticconvert/testdata/unsupported.river b/converter/internal/staticconvert/testdata/unsupported.river index 95c5e192b55a..76923a6c7f06 100644 --- a/converter/internal/staticconvert/testdata/unsupported.river +++ b/converter/internal/staticconvert/testdata/unsupported.river @@ -8,7 +8,7 @@ prometheus.scrape "metrics_agent_prometheus" { prometheus.remote_write "metrics_agent" { endpoint { - name = "agent-d885f6" + name = "agent-eea444" url = "https://prometheus-us-central1.grafana.net/api/prom/push" queue_config { } @@ -24,15 +24,24 @@ logging { prometheus.exporter.statsd "integrations_statsd_exporter" { } +discovery.relabel "integrations_statsd_exporter" { + targets = prometheus.exporter.statsd.integrations_statsd_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/statsd_exporter" + } +} + prometheus.scrape "integrations_statsd_exporter" { - targets = prometheus.exporter.statsd.integrations_statsd_exporter.targets + targets = discovery.relabel.integrations_statsd_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/statsd_exporter" } prometheus.remote_write "integrations" { endpoint { - name = "agent-d885f6" + name = "agent-eea444" url = "https://prometheus-us-central1.grafana.net/api/prom/push" queue_config { } diff --git a/converter/internal/staticconvert/testdata_windows/integrations.river b/converter/internal/staticconvert/testdata_windows/integrations.river index d550abac368e..2124e95df4b0 100644 --- a/converter/internal/staticconvert/testdata_windows/integrations.river +++ b/converter/internal/staticconvert/testdata_windows/integrations.river @@ -22,16 +22,19 @@ http { } } -prometheus.exporter.windows "integrations_windows_exporter" { - exchange { } +prometheus.exporter.windows "integrations_windows_exporter" { } - network { - exclude = ".+" +discovery.relabel "integrations_windows_exporter" { + targets = prometheus.exporter.windows.integrations_windows_exporter.targets + + rule { + target_label = "job" + replacement = "integrations/windows_exporter" } } prometheus.scrape "integrations_windows_exporter" { - targets = prometheus.exporter.windows.integrations_windows_exporter.targets + targets = discovery.relabel.integrations_windows_exporter.output forward_to = [prometheus.remote_write.integrations.receiver] job_name = "integrations/windows_exporter" } diff --git a/converter/internal/staticconvert/validate.go b/converter/internal/staticconvert/validate.go index eed0b8c57717..2c5aeb87c1d0 100644 --- a/converter/internal/staticconvert/validate.go +++ b/converter/internal/staticconvert/validate.go @@ -35,9 +35,12 @@ import ( v2 "github.com/grafana/agent/pkg/integrations/v2" agent_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/agent" apache_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/apache_http" + app_agent_receiver_v2 "github.com/grafana/agent/pkg/integrations/v2/app_agent_receiver" blackbox_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/blackbox_exporter" - "github.com/grafana/agent/pkg/integrations/v2/metricsutils" + eventhandler_v2 "github.com/grafana/agent/pkg/integrations/v2/eventhandler" + metricsutils_v2 "github.com/grafana/agent/pkg/integrations/v2/metricsutils" snmp_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/snmp_exporter" + vmware_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/vmware_exporter" "github.com/grafana/agent/pkg/integrations/windows_exporter" "github.com/grafana/agent/pkg/logs" "github.com/grafana/agent/pkg/metrics" @@ -166,9 +169,13 @@ func validateIntegrationsV2(integrationsConfig *v2.SubsystemOptions) diag.Diagno switch itg := integration.(type) { case *agent_exporter_v2.Config: case *apache_exporter_v2.Config: + case *app_agent_receiver_v2.Config: + diags.AddAll(common.ValidateSupported(common.NotEquals, itg.TracesInstance, "", "app_agent_receiver traces_instance", "")) case *blackbox_exporter_v2.Config: + case *eventhandler_v2.Config: case *snmp_exporter_v2.Config: - case *metricsutils.ConfigShim: + case *vmware_exporter_v2.Config: + case *metricsutils_v2.ConfigShim: switch v1_itg := itg.Orig.(type) { case *azure_exporter.Config: case *cadvisor.Config: diff --git a/converter/internal/test_common/testing.go b/converter/internal/test_common/testing.go index 4ee3f2f23be3..03855fc2ca31 100644 --- a/converter/internal/test_common/testing.go +++ b/converter/internal/test_common/testing.go @@ -18,6 +18,7 @@ import ( cluster_service "github.com/grafana/agent/service/cluster" http_service "github.com/grafana/agent/service/http" "github.com/grafana/agent/service/labelstore" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -194,7 +195,7 @@ func attemptLoadingFlowConfig(t *testing.T, river []byte) { // properly. http_service.New(http_service.Options{}), clusterService, - labelstore.New(nil), + labelstore.New(nil, prometheus.DefaultRegisterer), }, }) err = f.LoadSource(cfg, nil) diff --git a/docs/README.md b/docs/README.md index 4419bb493c0d..96b045920465 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,14 +1,15 @@ # Grafana Agent Documentation -This directory contains documentation for Grafana Agent. It is split into two -parts: +This directory contains documentation for Grafana Agent. It is split into the +following parts: -* `user/`: Documentation for users. This directory is hosted on - [grafana.com/docs/agent](https://grafana.com/docs/agent/latest/) and we - recommend interacting with it there instead of viewing the markdown on +* `sources/`: Source of user-facing documentation. This directory is hosted on + [grafana.com/docs/agent](https://grafana.com/docs/agent/latest/), and we + recommend viewing it there instead of the markdown on GitHub. * `developer/`: Documentation for contributors and maintainers. * `rfcs/`: RFCs for proposals relating to Grafana Agent. +* `generator/`: Code for generating some parts of the documentation. ## Preview the website @@ -21,9 +22,13 @@ First, inside the `docs/` folder run `make check-cloudwatch-integration` to veri If the check fails, then the doc supported services list should be updated. For that, run `make generate-cloudwatch-integration` to get the updated list, which should replace the old one in [the docs](./sources/static/configuration/integrations/cloudwatch-exporter-config.md). +## Update generated reference docs + +Some sections of Grafana Agent Flow reference documentation are automatically generated. To update them, run `make generate-docs`. + ### Community Projects -Below is a list of community-led projects for working with Grafana Agent. These projects are not maintained or supported by Grafana Labs. +The following is a list of community-led projects for working with Grafana Agent. These projects are not maintained or supported by Grafana Labs. #### Helm (Kubernetes Deployment) diff --git a/docs/developer/writing-docs.md b/docs/developer/writing-docs.md new file mode 100644 index 000000000000..7cd9be07eed3 --- /dev/null +++ b/docs/developer/writing-docs.md @@ -0,0 +1,78 @@ +# Writing documentation + +This page is a collection of guidelines and best practices for writing +documentation for Grafana Agent. + +## Flow Mode documentation organisation + +The Flow mode documentation is organized into the following sections: + +### Get started + +The best place to start for new users who are onboarding. + +We showcase the features of the Agent and help users decide when to use Flow and +whether it's a good fit for them. + +This section includes how to quickly install the agent and get hands-on +experience with a simple "hello world" configuration. + +### Concepts + +As defined in the [writer's toolkit][]: + +> Provides an overview and background information. Answers the question “What is +> it?”. + +It helps users to learn the concepts of the Agent used throughout the +documentation. + +### Tutorials + +As defined in the [writer's toolkit][]: + +> Provides procedures that users can safely reproduce and learn from. Answers +> the question: “Can you teach me to …?” + +These are pages dedicated to learning. These are more broad, +while [Tasks](#tasks) are focused on one objective. Tutorials may use +non-production-ready examples to facilitate learning, while tasks are expected +to provide production-ready solutions. + +### Tasks + +As defined in the [writer's toolkit][]: + +> Provides numbered steps that describe how to achieve an outcome. Answers the +> question “How do I?”. + +However, in the Agent documentation we don't mandate the use of numbered steps. +We do expect that tasks allow users to achieve a specific outcome by following +the page step by step, but we don't require numbered steps because some tasks +branch out into multiple paths, and numbering the steps would look more +confusing. + +Tasks are production-ready and contain best practices and recommendations. They +are quite detailed, with Reference pages being the only type of documentation +that has more detail. + +Tasks should not be paraphrasing things which are already mentioned in the +Reference pages, such as default values and the meaning of the arguments. +Instead, they should link to relevant Reference pages. + +### Reference + +The Reference section is a collection of pages that describe the Agent +components and their configuration options exhaustively. This is a more narrow +definition than the one found in the [writer's toolkit][]. + +This is our most detailed documentation, and it should be used as a source of +truth. The contents of the Reference pages should not be repeated in other parts +of the documentation. + +### Release notes + +Release notes contain all the notable changes in the Agent. They are updated as +part of the release process. + +[writer's toolkit]: https://grafana.com/docs/writers-toolkit/structure/topic-types/ diff --git a/docs/developer/writing-exporter-flow-components.md b/docs/developer/writing-exporter-flow-components.md index 20afa6264a35..fb0681ae043b 100644 --- a/docs/developer/writing-exporter-flow-components.md +++ b/docs/developer/writing-exporter-flow-components.md @@ -39,7 +39,8 @@ The river config would look like this: prometheus.exporter.blackbox "example" { config_file = "blackbox_modules.yml" - target "example" { + target { + name = "example" address = "http://example.com" module = "http_2xx" } diff --git a/docs/docs_updated_test.go b/docs/docs_updated_test.go new file mode 100644 index 000000000000..a3c17e98b393 --- /dev/null +++ b/docs/docs_updated_test.go @@ -0,0 +1,72 @@ +//go:build !windows + +package docs + +import ( + "flag" + "strings" + "testing" + + "github.com/grafana/agent/component" + _ "github.com/grafana/agent/component/all" + "github.com/grafana/agent/component/metadata" + "github.com/grafana/agent/docs/generator" + "github.com/stretchr/testify/require" +) + +// Run the below generate command to automatically update the Markdown docs with generated content +//go:generate go test -fix-tests -v + +var fixTestsFlag = flag.Bool("fix-tests", false, "update the test files with the current generated content") + +func TestLinksToTypesSectionsUpdated(t *testing.T) { + for _, name := range component.AllNames() { + t.Run(name, func(t *testing.T) { + runForGenerator(t, generator.NewLinksToTypesGenerator(name)) + }) + } +} + +func TestCompatibleComponentsPageUpdated(t *testing.T) { + path := "sources/flow/reference/compatibility/_index.md" + for _, typ := range metadata.AllTypes { + t.Run(typ.Name, func(t *testing.T) { + t.Run("exporters", func(t *testing.T) { + runForGenerator(t, generator.NewExportersListGenerator(typ, path)) + }) + t.Run("consumers", func(t *testing.T) { + runForGenerator(t, generator.NewConsumersListGenerator(typ, path)) + }) + }) + } +} + +func runForGenerator(t *testing.T, g generator.DocsGenerator) { + if *fixTestsFlag { + err := g.Write() + require.NoError(t, err, "failed to write generated content for: %q", g.Name()) + t.Log("updated the docs with generated content", g.Name()) + return + } + + generated, err := g.Generate() + require.NoError(t, err, "failed to generate: %q", g.Name()) + + if strings.TrimSpace(generated) == "" { + actual, err := g.Read() + require.Error(t, err, "expected error when reading existing generated docs for %q", g.Name()) + require.Contains(t, err.Error(), "markers not found", "expected error to be about missing markers") + require.Empty(t, actual, "expected empty actual content for %q", g.Name()) + return + } + + actual, err := g.Read() + require.NoError(t, err, "failed to read existing generated docs for %q, try running 'go generate ./docs'", g.Name()) + require.Contains( + t, + actual, + strings.TrimSpace(generated), + "outdated docs detected when running %q, try updating with 'go generate ./docs'", + g.Name(), + ) +} diff --git a/docs/generator/compatible_components_page.go b/docs/generator/compatible_components_page.go new file mode 100644 index 000000000000..ae79597cb472 --- /dev/null +++ b/docs/generator/compatible_components_page.go @@ -0,0 +1,102 @@ +package generator + +import ( + "fmt" + "sort" + "strings" + + "github.com/grafana/agent/component/metadata" + "golang.org/x/exp/maps" +) + +type CompatibleComponentsListGenerator struct { + filePath string + t metadata.Type + sectionName string + generateFn func() string +} + +func NewExportersListGenerator(t metadata.Type, filePath string) *CompatibleComponentsListGenerator { + return &CompatibleComponentsListGenerator{ + filePath: filePath, + t: t, + sectionName: "exporters", + generateFn: func() string { return listOfComponentsExporting(t) }, + } +} + +func NewConsumersListGenerator(t metadata.Type, filePath string) *CompatibleComponentsListGenerator { + return &CompatibleComponentsListGenerator{ + filePath: filePath, + t: t, + sectionName: "consumers", + generateFn: func() string { return listOfComponentsAccepting(t) }, + } +} + +func (c *CompatibleComponentsListGenerator) Name() string { + return fmt.Sprintf("generator of %s section for %q in %q", c.sectionName, c.t.Name, c.filePath) +} + +func (c *CompatibleComponentsListGenerator) Generate() (string, error) { + return c.generateFn(), nil +} + +func (c *CompatibleComponentsListGenerator) Read() (string, error) { + content, err := readBetweenMarkers(c.startMarker(), c.endMarker(), c.filePath) + if err != nil { + return "", fmt.Errorf("failed to read existing content for %q: %w", c.Name(), err) + } + return content, err +} + +func (c *CompatibleComponentsListGenerator) Write() error { + newSection, err := c.Generate() + if err != nil { + return err + } + if strings.TrimSpace(newSection) == "" { + return nil + } + newSection = "\n" + newSection + "\n" + return writeBetweenMarkers(c.startMarker(), c.endMarker(), c.filePath, newSection, false) +} + +func (c *CompatibleComponentsListGenerator) startMarker() string { + return fmt.Sprintf("", strings.ToUpper(c.sectionName), c.t.Name) +} + +func (c *CompatibleComponentsListGenerator) endMarker() string { + return fmt.Sprintf("", strings.ToUpper(c.sectionName), c.t.Name) +} + +func listOfComponentsAccepting(dataType metadata.Type) string { + return listOfLinksToComponents(allComponentsThatAccept(dataType)) +} + +func listOfComponentsExporting(dataType metadata.Type) string { + return listOfLinksToComponents(allComponentsThatExport(dataType)) +} + +func listOfLinksToComponents(components []string) string { + str := "" + groups := make(map[string][]string) + + for _, component := range components { + parts := strings.SplitN(component, ".", 2) + namespace := parts[0] + groups[namespace] = append(groups[namespace], component) + } + + sortedNamespaces := maps.Keys(groups) + sort.Strings(sortedNamespaces) + + for _, namespace := range sortedNamespaces { + str += fmt.Sprintf("\n{{< collapse title=%q >}}\n", namespace) + for _, component := range groups[namespace] { + str += fmt.Sprintf("- [%[1]s]({{< relref \"../components/%[1]s.md\" >}})\n", component) + } + str += "{{< /collapse >}}\n" + } + return str +} diff --git a/docs/generator/docs_generator.go b/docs/generator/docs_generator.go new file mode 100644 index 000000000000..c4012cc14d74 --- /dev/null +++ b/docs/generator/docs_generator.go @@ -0,0 +1,86 @@ +package generator + +import ( + "bytes" + "fmt" + "os" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/metadata" +) + +type DocsGenerator interface { + Name() string + Generate() (string, error) + Read() (string, error) + Write() error +} + +func allComponentsThat(f func(meta metadata.Metadata) bool) []string { + var result []string + for _, name := range component.AllNames() { + meta, err := metadata.ForComponent(name) + if err != nil { + panic(err) // should never happen + } + + if f(meta) { + result = append(result, name) + } + } + return result +} + +func allComponentsThatExport(dataType metadata.Type) []string { + return allComponentsThat(func(meta metadata.Metadata) bool { + return meta.ExportsType(dataType) + }) +} + +func allComponentsThatAccept(dataType metadata.Type) []string { + return allComponentsThat(func(meta metadata.Metadata) bool { + return meta.AcceptsType(dataType) + }) +} + +func writeBetweenMarkers(startMarker string, endMarker string, filePath string, content string, appendIfMissing bool) error { + fileContents, err := os.ReadFile(filePath) + if err != nil { + return err + } + + replacement := append(append([]byte(startMarker), []byte(content)...), []byte(endMarker)...) + + startIndex := bytes.Index(fileContents, []byte(startMarker)) + endIndex := bytes.LastIndex(fileContents, []byte(endMarker)) + var newFileContents []byte + if startIndex == -1 || endIndex == -1 { + if !appendIfMissing { + return fmt.Errorf("required markers %q and %q do not exist in %q", startMarker, endMarker, filePath) + } + // Append the new section to the end of the file + newFileContents = append(fileContents, replacement...) + } else { + // Replace the section with the new content + newFileContents = append(newFileContents, fileContents[:startIndex]...) + newFileContents = append(newFileContents, replacement...) + newFileContents = append(newFileContents, fileContents[endIndex+len(endMarker):]...) + } + err = os.WriteFile(filePath, newFileContents, 0644) + return err +} + +func readBetweenMarkers(startMarker string, endMarker string, filePath string) (string, error) { + fileContents, err := os.ReadFile(filePath) + if err != nil { + return "", err + } + + startIndex := bytes.Index(fileContents, []byte(startMarker)) + endIndex := bytes.LastIndex(fileContents, []byte(endMarker)) + if startIndex == -1 || endIndex == -1 { + return "", fmt.Errorf("markers not found: %q or %q", startMarker, endMarker) + } + + return string(fileContents[startIndex+len(startMarker) : endIndex]), nil +} diff --git a/docs/generator/links_to_types.go b/docs/generator/links_to_types.go new file mode 100644 index 000000000000..867654e1648d --- /dev/null +++ b/docs/generator/links_to_types.go @@ -0,0 +1,124 @@ +package generator + +import ( + "fmt" + "regexp" + "strings" + + "github.com/grafana/agent/component/metadata" +) + +type LinksToTypesGenerator struct { + component string +} + +func NewLinksToTypesGenerator(component string) *LinksToTypesGenerator { + return &LinksToTypesGenerator{component: component} +} + +func (l *LinksToTypesGenerator) Name() string { + return fmt.Sprintf("generator of links to types for %q reference page", l.component) +} + +func (l *LinksToTypesGenerator) Generate() (string, error) { + meta, err := metadata.ForComponent(l.component) + if err != nil { + return "", err + } + if meta.Empty() { + return "", nil + } + + heading := "\n## Compatible components\n\n" + acceptingSection := acceptingComponentsSection(l.component, meta) + outputSection := outputComponentsSection(l.component, meta) + + if acceptingSection == "" && outputSection == "" { + return "", nil + } + + note := ` +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} +` + + return heading + acceptingSection + outputSection + note, nil +} + +func (l *LinksToTypesGenerator) Read() (string, error) { + content, err := readBetweenMarkers(l.startMarker(), l.endMarker(), l.pathToComponentMarkdown()) + if err != nil { + return "", fmt.Errorf("failed to read existing content for %q: %w", l.Name(), err) + } + return content, err +} + +func (l *LinksToTypesGenerator) Write() error { + newSection, err := l.Generate() + if err != nil { + return err + } + if strings.TrimSpace(newSection) == "" { + return nil + } + newSection = "\n" + newSection + "\n" + return writeBetweenMarkers(l.startMarker(), l.endMarker(), l.pathToComponentMarkdown(), newSection, true) +} + +func (l *LinksToTypesGenerator) startMarker() string { + return "" +} + +func (l *LinksToTypesGenerator) endMarker() string { + return "" +} + +func (l *LinksToTypesGenerator) pathToComponentMarkdown() string { + return fmt.Sprintf("sources/flow/reference/components/%s.md", l.component) +} + +func outputComponentsSection(name string, meta metadata.Metadata) string { + section := "" + for _, outputDataType := range meta.AllTypesExported() { + if list := allComponentsThatAccept(outputDataType); len(list) > 0 { + section += fmt.Sprintf( + "- Components that consume [%s]({{< relref \"../compatibility/%s\" >}})\n", + outputDataType.Name, + anchorFor(outputDataType.Name, "consumers"), + ) + } + } + if section != "" { + section = fmt.Sprintf("`%s` has exports that can be consumed by the following components:\n\n", name) + section + } + return section +} + +func acceptingComponentsSection(componentName string, meta metadata.Metadata) string { + section := "" + for _, acceptedDataType := range meta.AllTypesAccepted() { + if list := allComponentsThatExport(acceptedDataType); len(list) > 0 { + section += fmt.Sprintf( + "- Components that export [%s]({{< relref \"../compatibility/%s\" >}})\n", + acceptedDataType.Name, + anchorFor(acceptedDataType.Name, "exporters"), + ) + } + } + if section != "" { + section = fmt.Sprintf("`%s` can accept arguments from the following components:\n\n", componentName) + section + "\n" + } + return section +} + +func anchorFor(parts ...string) string { + for i, s := range parts { + reg := regexp.MustCompile("[^a-z0-9-_]+") + parts[i] = reg.ReplaceAllString(strings.ReplaceAll(strings.ToLower(s), " ", "-"), "") + } + return "#" + strings.Join(parts, "-") +} diff --git a/docs/make-docs b/docs/make-docs index 751e22f4fd48..25176a37f051 100755 --- a/docs/make-docs +++ b/docs/make-docs @@ -6,6 +6,14 @@ # [Semantic versioning](https://semver.org/) is used to help the reader identify the significance of changes. # Changes are relevant to this script and the support docs.mk GNU Make interface. # + +# ## 5.1.2 (2023-11-08) +# +# ### Added +# +# - Hide manual_mount warning messages from non-debug output. +# Set the DEBUG environment variable to see all hidden messages. +# # ## 5.1.1 (2023-10-30) # # ### Added @@ -779,7 +787,8 @@ EOF -e '/website-proxy/ d' \ -e '/rm -rf dist*/ d' \ -e '/Press Ctrl+C to stop/ d' \ - -e '/make/ d' + -e '/make/ d' \ + -e '/WARNING: The manual_mount source directory/ d' fi ;; esac diff --git a/docs/sources/_index.md b/docs/sources/_index.md index c46dfe04feea..890ac82d71a7 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -3,12 +3,13 @@ aliases: - /docs/grafana-cloud/agent/ - /docs/grafana-cloud/monitor-infrastructure/agent/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/ +- /docs/grafana-cloud/send-data/agent/ canonical: https://grafana.com/docs/agent/latest/ title: Grafana Agent description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: v0.37.4 + AGENT_RELEASE: v0.39.0 OTEL_VERSION: v0.87.0 --- @@ -102,18 +103,18 @@ one minor release is moved. Patch and security releases may be created at any time. {{% docs/reference %}} -[variants]: "/docs/agent/ -> /docs/agent//about" -[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/about" +[variants]: "/docs/agent/ -> /docs/agent//about" +[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/about" -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static" +[Static mode]: "/docs/agent/ -> /docs/agent//static" +[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/operator" +[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" +[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" +[Flow mode]: "/docs/agent/ -> /docs/agent//flow" +[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index a8c7eb6fddf9..c8dccbfb6748 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -3,6 +3,7 @@ aliases: - /docs/grafana-cloud/agent/ - /docs/grafana-cloud/monitor-infrastructure/agent/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/ +- /docs/grafana-cloud/send-data/agent/ canonical: https://grafana.com/docs/agent/latest/ title: Grafana Agent description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector @@ -102,18 +103,18 @@ one minor release is moved. Patch and security releases may be created at any time. {{% docs/reference %}} -[variants]: "/docs/agent/ -> /docs/agent//about" -[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/about" +[variants]: "/docs/agent/ -> /docs/agent//about" +[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/about" -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static" +[Static mode]: "/docs/agent/ -> /docs/agent//static" +[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/operator" +[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" +[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" +[Flow mode]: "/docs/agent/ -> /docs/agent//flow" +[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/about.md b/docs/sources/about.md index 4684df62a529..57468c7f3e24 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -4,36 +4,50 @@ aliases: - /docs/grafana-cloud/agent/about/ - /docs/grafana-cloud/monitor-infrastructure/agent/about/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/about/ +- /docs/grafana-cloud/send-data/agent/about/ canonical: https://grafana.com/docs/agent/latest/about/ +description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector menuTitle: Introduction title: Introduction to Grafana Agent -description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector weight: 100 --- # Introduction to Grafana Agent -Grafana Agent is a vendor-neutral, batteries-included telemetry collector. It -is designed to be flexible, performant, and compatible with multiple ecosystems -such as Prometheus and OpenTelemetry. +Grafana Agent is a flexible, high performance, vendor-neutral telemetry collector. It's fully compatible with the most popular open source observability standards such as OpenTelemetry (OTel) and Prometheus. Grafana Agent is available in three different variants: -- [Static mode][]: The default, original variant of Grafana Agent. -- [Static mode Kubernetes operator][]: Variant which manages agents running in Static mode. -- [Flow mode][]: The newer, more flexible re-imagining variant of Grafana Agent. +- [Static mode][]: The original Grafana Agent. +- [Static mode Kubernetes operator][]: The Kubernetes operator for Static mode. +- [Flow mode][]: The new, component-based Grafana Agent. {{% docs/reference %}} -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static" - -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/operator" - -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" +[Static mode]: "/docs/agent/ -> /docs/agent//static" +[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" +[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" +[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" +[Flow mode]: "/docs/agent/ -> /docs/agent//flow" +[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" +[Prometheus]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" +[Prometheus]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" +[OTel]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" +[OTel]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" +[Loki]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" +[Loki]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" +[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering/_index.md" +[clustering]: "/docs/grafana-cloud/ -> /docs/agent//flow/concepts/clustering/_index.md" +[rules]: "/docs/agent/ -> /docs/agent/latest/flow/reference/components/mimir.rules.kubernetes.md" +[rules]: "/docs/grafana-cloud/ -> /docs/agent/latest/flow/reference/components/mimir.rules.kubernetes.md" +[vault]: "/docs/agent/ -> /docs/agent//flow/reference/components/remote.vault.md" +[vault]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/components/remote.vault.md" {{% /docs/reference %}} +[Pyroscope]: https://grafana.com/docs/pyroscope/latest/configure-client/grafana-agent/go_pull +[helm chart]: https://grafana.com/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/config-k8s-helmchart +[sla]: https://grafana.com/legal/grafana-cloud-sla +[observability]: https://grafana.com/docs/grafana-cloud/monitor-applications/application-observability/setup#send-telemetry + ## Stability | Project | Stability | @@ -44,13 +58,44 @@ Grafana Agent is available in three different variants: ## Choose which variant of Grafana Agent to run -> **NOTE**: You do not have to pick just one variant; it is possible to +> **NOTE**: You don't have to pick just one variant; it's possible to > mix-and-match installations of Grafana Agent. +### Compare variants + +Each variant of Grafana Agent provides a different level of functionality. The following tables compare Grafana Agent Flow mode with Static mode, Operator, OpenTelemetry, and Prometheus. + +#### Core telemetry + +| | Grafana Agent Flow mode | Grafana Agent Static mode | Grafana Agent Operator | OpenTelemetry Collector | Prometheus Agent mode | +|--------------|--------------------------|---------------------------|------------------------|-------------------------|-----------------------| +| **Metrics** | [Prometheus][], [OTel][] | Prometheus | Prometheus | OTel | Prometheus | +| **Logs** | [Loki][], [OTel][] | Loki | Loki | OTel | No | +| **Traces** | [OTel][] | OTel | OTel | OTel | No | +| **Profiles** | [Pyroscope][] | No | No | Planned | No | + +#### **OSS features** + +| | Grafana Agent Flow mode | Grafana Agent Static mode | Grafana Agent Operator | OpenTelemetry Collector | Prometheus Agent mode | +|--------------------------|-------------------------|---------------------------|------------------------|-------------------------|-----------------------| +| **Kubernetes native** | [Yes][helm chart] | No | Yes | Yes | No | +| **Clustering** | [Yes][clustering] | No | No | No | No | +| **Prometheus rules** | [Yes][rules] | No | No | No | No | +| **Native Vault support** | [Yes][vault] | No | No | No | No | + +#### Grafana Cloud solutions + +| | Grafana Agent Flow mode | Grafana Agent Static mode | Grafana Agent Operator | OpenTelemetry Collector | Prometheus Agent mode | +|-------------------------------|-------------------------|---------------------------|------------------------|-------------------------|-----------------------| +| **Official vendor support** | [Yes][sla] | Yes | Yes | No | No | +| **Cloud integrations** | Some | Yes | Some | No | No | +| **Kubernetes monitoring** | [Yes][helm chart] | Yes, custom | Yes | No | Yes, custom | +| **Application observability** | [Yes][observability] | No | No | Yes | No | + ### Static mode -[Static mode][] is the original variant of Grafana Agent, first introduced on -March 3, 2020. Static mode is the most mature variant of Grafana Agent. +[Static mode][] is the original variant of Grafana Agent, introduced on March 3, 2020. +Static mode is the most mature variant of Grafana Agent. You should run Static mode when: @@ -60,12 +105,15 @@ You should run Static mode when: ### Static mode Kubernetes operator -The [Static mode Kubernetes operator][] is a variant of Grafana Agent first -introduced on June 17, 2021. It is currently in beta. +{{% admonition type="note" %}} +Grafana Agent version 0.37 and newer provides Prometheus Operator compatibility in Flow mode. +You should use Grafana Agent Flow mode for all new Grafana Agent deployments. +{{% /admonition %}} + +The [Static mode Kubernetes operator][] is a variant of Grafana Agent introduced on June 17, 2021. It's currently in beta. -The Static mode Kubernetes operator was introduced for compatibility with -Prometheus Operator, allowing static mode to support resources from Prometheus -Operator, such as ServiceMonitors, PodMonitors, and Probes. +The Static mode Kubernetes operator provides compatibility with Prometheus Operator, +allowing static mode to support resources from Prometheus Operator, such as ServiceMonitors, PodMonitors, and Probes. You should run the Static mode Kubernetes operator when: @@ -75,34 +123,24 @@ You should run the Static mode Kubernetes operator when: ### Flow mode -[Flow mode][] is a stable variant of Grafana Agent first introduced on -September 29, 2022. +[Flow mode][] is a stable variant of Grafana Agent, introduced on September 29, 2022. -Flow mode was introduced as a re-imagining of Grafana Agent with a focus on -vendor neutrality, ease-of-use, improved debuggability, and ability to adapt to -the needs of power users by adopting a configuration-as-code model. - -Flow mode is considered to be the future of the Grafana Agent project. +Grafana Agent Flow mode focuses on vendor neutrality, ease-of-use, +improved debugging, and ability to adapt to the needs of power users by adopting a configuration-as-code model. You should run Flow mode when: * You need functionality unique to Flow mode: - * **Debuggability**: You need to more easily debug configuration issues using - a UI. - - * **Full OpenTelemetry support**: Support for collecting OpenTelemetry - metrics, logs, and traces. + * **Improved debugging**: You need to more easily debug configuration issues using a UI. - * **PrometheusRule support**: Support for the PrometheusRule resource from - the Prometheus Operator project for configuring Grafana Mimir. + * **Full OpenTelemetry support**: Support for collecting OpenTelemetry metrics, logs, and traces. - * **Ecosystem transformation**: You need to be able to convert Prometheus and - Loki pipelines to and from OpenTelmetry Collector pipelines. + * **PrometheusRule support**: Support for the PrometheusRule resource from the Prometheus Operator project for configuring Grafana Mimir. - * **Grafana Pyroscope support**: Support for collecting profiles for Grafana - Pyroscope. + * **Ecosystem transformation**: You need to be able to convert Prometheus and Loki pipelines to and from OpenTelmetry Collector pipelines. + * **Grafana Pyroscope support**: Support for collecting profiles for Grafana Pyroscope. ### BoringCrypto @@ -110,9 +148,9 @@ You should run Flow mode when: binaries and images with BoringCrypto enabled. Builds and Docker images for Linux arm64/amd64 are made available. {{% docs/reference %}} -[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations" -[integrations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations" +[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations" +[integrations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/integrations" -[components]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[components]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/components" +[components]: "/docs/agent/ -> /docs/agent//flow/reference/components" +[components]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/components" {{% /docs/reference %}} diff --git a/docs/sources/assets/concepts_example_pipeline.svg b/docs/sources/assets/concepts_example_pipeline.svg deleted file mode 100644 index ebbfa535c37e..000000000000 --- a/docs/sources/assets/concepts_example_pipeline.svg +++ /dev/null @@ -1,304 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/sources/assets/flow_referencing_exports_diagram.svg b/docs/sources/assets/flow_referencing_exports_diagram.svg deleted file mode 100644 index f6bb77ef06bf..000000000000 --- a/docs/sources/assets/flow_referencing_exports_diagram.svg +++ /dev/null @@ -1,216 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/sources/assets/getting-started/loki-config.png b/docs/sources/assets/tasks/loki-config.png similarity index 100% rename from docs/sources/assets/getting-started/loki-config.png rename to docs/sources/assets/tasks/loki-config.png diff --git a/docs/sources/assets/getting-started/otlp-lgtm-graph.png b/docs/sources/assets/tasks/otlp-lgtm-graph.png similarity index 100% rename from docs/sources/assets/getting-started/otlp-lgtm-graph.png rename to docs/sources/assets/tasks/otlp-lgtm-graph.png diff --git a/docs/sources/assets/getting-started/prometheus-config.png b/docs/sources/assets/tasks/prometheus-config.png similarity index 100% rename from docs/sources/assets/getting-started/prometheus-config.png rename to docs/sources/assets/tasks/prometheus-config.png diff --git a/docs/sources/assets/getting-started/tempo-config.png b/docs/sources/assets/tasks/tempo-config.png similarity index 100% rename from docs/sources/assets/getting-started/tempo-config.png rename to docs/sources/assets/tasks/tempo-config.png diff --git a/docs/sources/data-collection.md b/docs/sources/data-collection.md index 51060733b0ea..80fbd874cdcf 100644 --- a/docs/sources/data-collection.md +++ b/docs/sources/data-collection.md @@ -4,14 +4,15 @@ aliases: - /docs/grafana-cloud/agent/data-collection/ - /docs/grafana-cloud/monitor-infrastructure/agent/data-collection/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/data-collection/ +- /docs/grafana-cloud/send-data/agent/data-collection/ canonical: https://grafana.com/docs/agent/latest/data-collection/ +description: Grafana Agent data collection menuTitle: Data collection title: Grafana Agent data collection -description: Grafana Agent data collection weight: 500 --- -# Data collection +# Grafana Agent Data collection By default, Grafana Agent sends anonymous but uniquely identifiable usage information from your Grafana Agent instance to Grafana Labs. These statistics are sent to `stats.grafana.org`. @@ -29,6 +30,7 @@ The usage information includes the following details: * List of enabled feature flags ([Static] mode only). * List of enabled integrations ([Static] mode only). * List of enabled [components][] ([Flow] mode only). +* Method used to deploy Grafana Agent, for example Docker, Helm, RPM, or Operator. This list may change over time. All newly reported data is documented in the CHANGELOG. @@ -42,7 +44,7 @@ You can use the `-disable-reporting` [command line flag][] to disable the report [components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" [components]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/cli/run.md" [Static]: "/docs/agent/ -> /docs/agent//static" -[Static]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static +[Static]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static [Flow]: "/docs/agent/ -> /docs/agent//flow" [Flow]: "/docs/grafana-cloud/ -> /docs/agent//flow" {{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/_index.md b/docs/sources/flow/_index.md index 65909d624de0..4262238f9020 100644 --- a/docs/sources/flow/_index.md +++ b/docs/sources/flow/_index.md @@ -3,16 +3,20 @@ aliases: - /docs/grafana-cloud/agent/flow/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/ +- /docs/grafana-cloud/send-data/agent/flow/ canonical: https://grafana.com/docs/agent/latest/flow/ +description: Grafana Agent Flow is a component-based revision of Grafana Agent with + a focus on ease-of-use, debuggability, and adaptability title: Flow mode -description: Grafana Agent Flow is a component-based revision of Grafana Agent with a focus on ease-of-use, debuggability, and adaptability weight: 400 +cascade: + PRODUCT_NAME: Grafana Agent Flow + PRODUCT_ROOT_NAME: Grafana Agent --- -# Flow mode +# {{% param "PRODUCT_NAME" %}} -The Flow mode of Grafana Agent (also called Grafana Agent Flow) is a -_component-based_ revision of Grafana Agent with a focus on ease-of-use, +{{< param "PRODUCT_NAME" >}} is a _component-based_ revision of {{< param "PRODUCT_ROOT_NAME" >}} with a focus on ease-of-use, debuggability, and ability to adapt to the needs of power users. Components allow for reusability, composability, and focus on a single task. @@ -32,7 +36,7 @@ Components allow for reusability, composability, and focus on a single task. ## Example ```river -// Discover Kubernetes pods to collect metrics from. +// Discover Kubernetes pods to collect metrics from discovery.kubernetes "pods" { role = "pod" } @@ -63,27 +67,27 @@ prometheus.remote_write "default" { } ``` -## Grafana Agent configuration generator -The [Grafana Agent configuration generator](https://grafana.github.io/agent-configurator/) will help you get a head start on creating flow code. +## {{< param "PRODUCT_NAME" >}} configuration generator + +The {{< param "PRODUCT_NAME" >}} [configuration generator](https://grafana.github.io/agent-configurator/) will help you get a head start on creating flow code. + {{% admonition type="note" %}} -This feature is experimental, and it does not support all River components. +This feature is experimental, and it doesn't support all River components. {{% /admonition %}} ## Next steps -* [Install][] Grafana Agent in flow mode. -* Learn about the core [Concepts][] of flow mode. -* Follow our [Getting started][] guides for Grafana Agent in flow mode. -* Follow our [Tutorials][] to get started with Grafana Agent in flow mode. -* Learn how to use the [Configuration language][]. +* [Install][] {{< param "PRODUCT_NAME" >}}. +* Learn about the core [Concepts][] of {{< param "PRODUCT_NAME" >}}. +* Follow our [Tutorials][] for hands-on learning of {{< param "PRODUCT_NAME" >}}. +* Consult our [Tasks][] instructions to accomplish common objectives with {{< param "PRODUCT_NAME" >}}. * Check out our [Reference][] documentation to find specific information you might be looking for. [Install]: {{< relref "./setup/install/" >}} [Concepts]: {{< relref "./concepts/" >}} -[Getting started]: {{< relref "./getting-started/" >}} +[Tasks]: {{< relref "./tasks/" >}} [Tutorials]: {{< relref "./tutorials/ ">}} -[Configuration language]: {{< relref "./config-language/" >}} [Reference]: {{< relref "./reference" >}} diff --git a/docs/sources/flow/concepts/_index.md b/docs/sources/flow/concepts/_index.md index 0eaca30934ac..786af8e5467b 100644 --- a/docs/sources/flow/concepts/_index.md +++ b/docs/sources/flow/concepts/_index.md @@ -4,14 +4,15 @@ aliases: - /docs/grafana-cloud/agent/flow/concepts/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/ canonical: https://grafana.com/docs/agent/latest/flow/concepts/ +description: Learn about the Grafana Agent Flow concepts title: Concepts -description: Learn about the Grafana Agent flow mode concepts weight: 100 --- # Concepts -This section explains primary concepts of Grafana Agent Flow. +This section explains the primary concepts of {{< param "PRODUCT_NAME" >}}. {{< section >}} diff --git a/docs/sources/flow/concepts/clustering.md b/docs/sources/flow/concepts/clustering.md index 9dac5fedbcc8..e02a6131d4a5 100644 --- a/docs/sources/flow/concepts/clustering.md +++ b/docs/sources/flow/concepts/clustering.md @@ -3,43 +3,37 @@ aliases: - /docs/grafana-cloud/agent/flow/concepts/clustering/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/clustering/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/clustering/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/clustering/ canonical: https://grafana.com/docs/agent/latest/flow/concepts/clustering/ +description: Learn about Grafana Agent clustering concepts labels: stage: beta menuTitle: Clustering title: Clustering (beta) -description: Learn about Grafana Agent clustering concepts weight: 500 --- # Clustering (beta) -Clustering enables a fleet of agents to work together for workload distribution -and high availability. It helps create horizontally scalable deployments with -minimal resource and operational overhead. +Clustering enables a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together for workload distribution and high availability. +It helps create horizontally scalable deployments with minimal resource and operational overhead. -To achieve this, Grafana Agent makes use of an eventually consistent model that -assumes all participating Agents are interchangeable and converge on using the -same configuration file. +To achieve this, {{< param "PRODUCT_NAME" >}} makes use of an eventually consistent model that assumes all participating +{{< param "PRODUCT_ROOT_NAME" >}}s are interchangeable and converge on using the same configuration file. -The behavior of a standalone, non-clustered agent is the same as if it was a -single-node cluster. +The behavior of a standalone, non-clustered {{< param "PRODUCT_ROOT_NAME" >}} is the same as if it were a single-node cluster. -You configure clustering by passing `cluster` command-line flags to the [run][] -command. +You configure clustering by passing `cluster` command-line flags to the [run][] command. ## Use cases ### Target auto-distribution -Target auto-distribution is the most basic use case of clustering; it allows -scraping components running on all peers to distribute scrape load between -themselves. For target auto-distribution to work correctly, all agents in the -same cluster must be able to reach the same service discovery APIs and must be -able to scrape the same targets. +Target auto-distribution is the most basic use case of clustering. +It allows scraping components running on all peers to distribute the scrape load between themselves. +Target auto-distribution requires that all {{< param "PRODUCT_ROOT_NAME" >}} in the same cluster can reach the same service discovery APIs and scrape the same targets. -You must explicitly enable target auto-distribution on components by defining a -`clustering` block, such as: +You must explicitly enable target auto-distribution on components by defining a `clustering` block. ```river prometheus.scrape "default" { @@ -51,18 +45,15 @@ prometheus.scrape "default" { } ``` -A cluster state change is detected when a new node joins or an existing node goes away. All participating components locally -recalculate target ownership and rebalance the number of targets they’re -scraping without explicitly communicating ownership over the network. +A cluster state change is detected when a new node joins or an existing node leaves. +All participating components locally recalculate target ownership and re-balance the number of targets they’re scraping without explicitly communicating ownership over the network. -Target auto-distribution allows you to dynamically scale the number of agents to distribute workload during peaks. -It also provides resiliency because targets are automatically picked up by one of the node peers if a node goes away. +Target auto-distribution allows you to dynamically scale the number of {{< param "PRODUCT_ROOT_NAME" >}}s to distribute workload during peaks. +It also provides resiliency because targets are automatically picked up by one of the node peers if a node leaves. -Grafana Agent uses a fully-local consistent hashing algorithm to distribute -targets, meaning that, on average, only ~1/N of the targets are redistributed. +{{< param "PRODUCT_NAME" >}} uses a local consistent hashing algorithm to distribute targets, meaning that, on average, only ~1/N of the targets are redistributed. -Refer to component reference documentation to discover whether it supports -clustering, such as: +Refer to component reference documentation to discover whether it supports clustering, such as: - [prometheus.scrape][] - [pyroscope.scrape][] @@ -71,23 +62,22 @@ clustering, such as: ## Cluster monitoring and troubleshooting -To monitor your cluster status, you can check the Flow UI [clustering page][]. -The [debugging][] topic contains some clues to help pin down probable -clustering issues. +You can use the {{< param "PRODUCT_NAME" >}} UI [clustering page][] to monitor your cluster status. +Refer to [Debugging clustering issues][debugging] for additional troubleshooting information. {{% docs/reference %}} [run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md#clustering-beta" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md#clustering-beta" +[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md#clustering-beta" [prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md#clustering-beta" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.scrape.md#clustering-beta" +[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md#clustering-beta" [pyroscope.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/pyroscope.scrape.md#clustering-beta" -[pyroscope.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.scrape.md#clustering-beta" +[pyroscope.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.scrape.md#clustering-beta" [prometheus.operator.podmonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" -[prometheus.operator.podmonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" +[prometheus.operator.podmonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" [prometheus.operator.servicemonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" -[prometheus.operator.servicemonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" -[clustering page]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#clustering-page" -[clustering page]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md#clustering-page" -[debugging]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#debugging-clustering-issues" -[debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md#debugging-clustering-issues" +[prometheus.operator.servicemonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" +[clustering page]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#clustering-page" +[clustering page]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#clustering-page" +[debugging]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#debugging-clustering-issues" +[debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#debugging-clustering-issues" {{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/component_controller.md b/docs/sources/flow/concepts/component_controller.md index 887d427b8c90..1a19e13b4979 100644 --- a/docs/sources/flow/concepts/component_controller.md +++ b/docs/sources/flow/concepts/component_controller.md @@ -4,16 +4,16 @@ aliases: - /docs/grafana-cloud/agent/flow/concepts/component_controller/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/component_controller/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/component_controller/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller/ canonical: https://grafana.com/docs/agent/latest/flow/concepts/component_controller/ -title: Component controller description: Learn about the component controller +title: Component controller weight: 200 --- # Component controller -The _component controller_ is the core part of Grafana Agent Flow which manages -components at runtime. +The _component controller_ is the core part of {{< param "PRODUCT_NAME" >}} which manages components at runtime. The component controller is responsible for: @@ -24,16 +24,12 @@ The component controller is responsible for: ## Component graph -As discussed in [Components][], a relationship between components is created -when an expression is used to set the argument of one component to an exported -field of another component. +A relationship between [components][Components] is created when an expression is used to set the argument of one component to an exported field of another component. -The set of all components and the relationships between them define a [directed -acyclic graph][DAG] (DAG), which informs the component controller which -references are valid and in what order components must be evaluated. +The set of all components and the relationships between them define a [Directed Acyclic Graph][DAG] (DAG), +which informs the component controller which references are valid and in what order components must be evaluated. -For a configuration file to be valid, components must not reference themselves or -contain a cyclic reference: +For a configuration file to be valid, components must not reference themselves or contain a cyclic reference. ```river // INVALID: local.file.some_file can not reference itself: @@ -54,95 +50,76 @@ local.file "b" { ## Component evaluation -A component is _evaluated_ when its expressions are computed into concrete -values. The computed values are then used to configure the component's runtime -behavior. The component controller is finished loading once all components are -evaluated, configured, and running. +A component is _evaluated_ when its expressions are computed into concrete values. +The computed values configure the component's runtime behavior. +The component controller is finished loading once all components are evaluated, configured, and running. -The component controller only evaluates a given component after evaluating all -of that component's dependencies. Components that do not depend on other -components can be evaluated at any time during the evaluation process. +The component controller only evaluates a given component after evaluating all of that component's dependencies. +Components that don't depend on other components can be evaluated anytime during the evaluation process. ## Component reevaluation -As mentioned in [Components][], a component is dynamic: a component can update -its exports any number of times throughout its lifetime. +A [component][Components] is dynamic. A component can update its exports any number of times throughout its lifetime. -When a component updates its exports, a _controller reevaluation_ is triggered: -the component controller reevaluates any component which references the changed -component, any components which reference those components, and so on, until -all affected components are reevaluated. +A _controller reevaluation_ is triggered when a component updates its exports. +The component controller reevaluates any component that references the changed component, any components that reference those components, +and so on, until all affected components are reevaluated. ## Component health At any given time, a component can have one of the following health states: -1. Unknown: default state, the component isn't running yet. -2. Healthy: the component is working as expected. -3. Unhealthy: the component is not working as expected. -4. Exited: the component has stopped and is no longer running. +1. Unknown: The default state. The component isn't running yet. +1. Healthy: The component is working as expected. +1. Unhealthy: The component isn't working as expected. +1. Exited: The component has stopped and is no longer running. -By default, the component controller determines the health of a component. The -component controller marks a component as healthy as long as that component is -running and its most recent evaluation succeeded. +By default, the component controller determines the health of a component. +The component controller marks a component as healthy as long as that component is running and its most recent evaluation succeeded. -Some components can report their own component-specific health information. For -example, the `local.file` component reports itself as unhealthy if the file it -was watching gets deleted. +Some components can report their own component-specific health information. +For example, the `local.file` component reports itself as unhealthy if the file it was watching gets deleted. -The overall health of a component is determined by combining the -controller-reported health of the component with the component-specific health -information. +The overall health of a component is determined by combining the controller-reported health of the component with the component-specific health information. -An individual component's health is independent of the health of any other -components it references: a component can be marked as healthy even if it -references an exported field of an unhealthy component. +An individual component's health is independent of the health of any other components it references. +A component can be marked as healthy even if it references an exported field of an unhealthy component. ## Handling evaluation failures -When a component fails to evaluate, it is marked as unhealthy with the reason -for why the evaluation failed. +When a component fails to evaluate, it's marked as unhealthy with the reason for why the evaluation failed. -When an evaluation fails, the component continue operating as normal: it -continues using its previous set of evaluated arguments, and it can continue -exporting new values. +When an evaluation fails, the component continues operating as normal. +The component continues using its previous set of evaluated arguments and can continue exporting new values. -This prevents failure propagation: if your `local.file` component which watches -API keys suddenly stops working, other components continues using the last -valid API key until the component returns to a healthy state. +This behavior prevents failure propagation. +If your `local.file` component, which watches API keys, suddenly stops working, other components continue using the last valid API key until the component returns to a healthy state. ## In-memory traffic -Components which expose HTTP endpoints, such as [prometheus.exporter.unix][], -can expose an internal address which will completely bypass the network and -communicate in-memory. This allows components within the same process to -communicate with one another without needing to be aware of any network-level -protections such as authentication or mutual TLS. +Components that expose HTTP endpoints, such as [prometheus.exporter.unix][], can expose an internal address that completely bypasses the network and communicate in-memory. +Components within the same process can communicate with one another without needing to be aware of any network-level protections such as authentication or mutual TLS. -The internal address defaults to `agent.internal:12345`. If this address -collides with a real target on your network, change it to something unique -using the `--server.http.memory-addr` flag in the [run][] command. +The internal address defaults to `agent.internal:12345`. +If this address collides with a real target on your network, change it to something unique using the `--server.http.memory-addr` flag in the [run][] command. -Components must opt-in to using in-memory traffic. See the individual -documentation for components to learn if in-memory traffic is supported. +Components must opt-in to using in-memory traffic. +Refer to the individual documentation for components to learn if in-memory traffic is supported. ## Updating the configuration file -Both the `/-/reload` HTTP endpoint and the `SIGHUP` signal can be used to -inform the component controller to reload the configuration file. When this happens, -the component controller will synchronize the set of running components with -the ones in the configuration file, removing components which are no longer defined in -the configuration file and creating new components which were added to the configuration -file. All components managed by the controller will be reevaluated after -reloading. +The `/-/reload` HTTP endpoint and the `SIGHUP` signal can inform the component controller to reload the configuration file. +When this happens, the component controller synchronizes the set of running components with the ones in the configuration file, +removing components no longer defined in the configuration file and creating new components added to the configuration file. +All components managed by the controller are reevaluated after reloading. [DAG]: https://en.wikipedia.org/wiki/Directed_acyclic_graph {{% docs/reference %}} [prometheus.exporter.unix]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.exporter.unix.md" -[prometheus.exporter.unix]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.unix.md" +[prometheus.exporter.unix]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.unix.md" [run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" +[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" [Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components.md" +[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/concepts/components.md b/docs/sources/flow/concepts/components.md index 4d3f273c73af..1f93d768113e 100644 --- a/docs/sources/flow/concepts/components.md +++ b/docs/sources/flow/concepts/components.md @@ -4,30 +4,28 @@ aliases: - /docs/grafana-cloud/agent/flow/concepts/components/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/components/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/components/ canonical: https://grafana.com/docs/agent/latest/flow/concepts/components/ -title: Components description: Learn about components +title: Components weight: 100 --- # Components -_Components_ are the building blocks of Grafana Agent Flow. Each component is -responsible for handling a single task, such as retrieving secrets or -collecting Prometheus metrics. +_Components_ are the building blocks of {{< param "PRODUCT_NAME" >}}. +Each component handles a single task, such as retrieving secrets or collecting Prometheus metrics. -Components are composed of two parts: +Components are composed of the following: -* Arguments: settings which configure a component. -* Exports: named values which a component exposes to other components. +* Arguments: Settings that configure a component. +* Exports: Named values that a component exposes to other components. -Each component has a name which describes what that component is responsible -for. For example, the `local.file` component is responsible for retrieving the -contents of files on disk. +Each component has a name that describes what that component is responsible for. +For example, the `local.file` component is responsible for retrieving the contents of files on disk. -Components are specified in the config file by first providing the component's -name with a user-specified label, and then by providing arguments to configure -the component: +You specify components in the configuration file by first providing the component's name with a user-specified label, +and then by giving arguments to configure the component. ```river discovery.kubernetes "pods" { @@ -39,54 +37,40 @@ discovery.kubernetes "nodes" { } ``` -> Components are referenced by combining the component name with its label. For -> example, a `local.file` component labeled `foo` would be referenced as -> `local.file.foo`. -> -> The combination of a component's name and its label must be unique within the -> configuration file. This means multiple instances of a component may be -> defined as long as each instance has a different label value. +You reference components by combining the component name with its label. +For example, you can reference a `local.file` component labeled `foo` as `local.file.foo`. + +The combination of a component's name and its label must be unique within the configuration file. +Combining component names with a label means you can define multiple instances of a component as long as each instance has a different label value. ## Pipelines -Most arguments for a component in a config file are constant values, such -setting a `log_level` attribute to the quoted string `"debug"`: +Most arguments for a component in a configuration file are constant values, such as setting a `log_level` attribute to the quoted string `"debug"`. ```river log_level = "debug" ``` -_Expressions_ can be used to dynamically compute the value of an argument at -runtime. Among other things, expressions can be used to retrieve the value of -an environment variable (`log_level = env("LOG_LEVEL")`) or to reference an -exported field of another component (`log_level = local.file.log_level.content`). +You use _expressions_ to dynamically compute the value of an argument at runtime. +You can use expressions to retrieve the value of an environment variable (`log_level = env("LOG_LEVEL")`) +or to reference an exported field of another component (`log_level = local.file.log_level.content`). -When a component's argument references an exported field of another component, -a dependant relationship is created: a component's input (arguments) now -depends on another component's output (exports). The input of the component -will now be re-evaluated any time the exports of the components it references -get updated. +You create a dependent relationship when a component's argument references an exported field of another component. +A component's arguments now depend on another component's exports. +The input of the component is re-evaluated whenever the exports of the components it references are updated. -The flow of data through the set of references between components forms a -_pipeline_. +The flow of data through the set of references between components forms a _pipeline_. An example pipeline may look like this: -1. A `local.file` component watches a file on disk containing an API key. -2. A `prometheus.remote_write` component is configured to receive metrics and - forward them to an external database using the API key from the `local.file` - for authentication. -3. A `discovery.kubernetes` component discovers and exports Kubernetes Pods - where metrics can be collected. -4. A `prometheus.scrape` component references the exports of the previous - component, and sends collected metrics to the `prometheus.remote_write` - component. +1. A `local.file` component watches a file that contains an API key. +1. A `prometheus.remote_write` component is configured to receive metrics and forward them to an external database using the API key from the `local.file` for authentication. +1. A `discovery.kubernetes` component discovers and exports Kubernetes Pods where metrics can be collected. +1. A `prometheus.scrape` component references the exports of the previous component, and sends collected metrics to the `prometheus.remote_write` component. -

-Flow of example pipeline -

+![Flow of example pipeline](/media/docs/agent/concepts_example_pipeline.svg) -The following config file represents the above pipeline: +The following configuration file represents the pipeline. ```river // Get our API key from disk. @@ -104,10 +88,10 @@ local.file "api_key" { is_secret = true } -// Create a prometheus.remote_write component which other components can send +// Create a prometheus.remote_write component, which other components can send // metrics to. // -// This component exports a "receiver" value which can be used by other +// This component exports a "receiver" value, which can be used by other // components to send metrics. prometheus.remote_write "prod" { endpoint { @@ -116,7 +100,7 @@ prometheus.remote_write "prod" { basic_auth { username = "admin" - // Use our password file for authenticating with the production database. + // Use the password file to authenticate with the production database. password = local.file.api_key.content } } @@ -124,7 +108,7 @@ prometheus.remote_write "prod" { // Find Kubernetes pods where we can collect metrics. // -// This component exports a "targets" value which contains the list of +// This component exports a "targets" value, which contains the list of // discovered pods. discovery.kubernetes "pods" { role = "pod" diff --git a/docs/sources/flow/concepts/config-language/_index.md b/docs/sources/flow/concepts/config-language/_index.md new file mode 100644 index 000000000000..80699732f3ac --- /dev/null +++ b/docs/sources/flow/concepts/config-language/_index.md @@ -0,0 +1,147 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/concepts/config-language/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/ +- configuration-language/ # /docs/agent/latest/flow/concepts/configuration-language/ +# Previous page aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/config-language/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/ +- ../configuration-language/ # /docs/agent/latest/flow/configuration-language/ +- ../concepts/configuration_language/ # /docs/agent/latest/flow/concepts/configuration_language/ +- /docs/grafana-cloud/agent/flow/concepts/configuration_language/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/configuration_language/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/configuration_language/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/configuration_language/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/ +description: Learn about the configuration language +title: Configuration language +weight: 10 +--- + +# Configuration language + +{{< param "PRODUCT_NAME" >}} dynamically configures and connects components with a custom configuration language called River. + +River aims to reduce errors in configuration files by making configurations easier to read and write. +River configurations use blocks that can be easily copied and pasted from the documentation to help you get started as quickly as possible. + +A River configuration file tells {{< param "PRODUCT_NAME" >}} which components to launch and how to bind them together into a pipeline. + +The River syntax uses blocks, attributes, and expressions. + +```river +// Create a local.file component labeled my_file. +// This can be referenced by other components as local.file.my_file. +local.file "my_file" { + filename = "/tmp/my-file.txt" +} + +// Pattern for creating a labeled block, which the above block follows: +BLOCK_NAME "BLOCK_LABEL" { + // Block body + IDENTIFIER = EXPRESSION // Attribute +} + +// Pattern for creating an unlabeled block: +BLOCK_NAME { + // Block body + IDENTIFIER = EXPRESSION // Attribute +} +``` + +[River is designed][RFC] with the following requirements in mind: + +* _Fast_: The configuration language must be fast so the component controller can quickly evaluate changes. +* _Simple_: The configuration language must be easy to read and write to minimize the learning curve. +* _Debuggable_: The configuration language must give detailed information when there's a mistake in the configuration file. + +River is similar to HCL, the language Terraform and other Hashicorp projects use. +It's a distinct language with custom syntax and features, such as first-class functions. + +* Blocks are a group of related settings and usually represent creating a component. + Blocks have a name that consists of zero or more identifiers separated by `.`, an optional user label, and a body containing attributes and nested blocks. +* Attributes appear within blocks and assign a value to a name. +* Expressions represent a value, either literally or by referencing and combining other values. + You use expressions to compute a value for an attribute. + +River is declarative, so ordering components, blocks, and attributes within a block isn't significant. +The relationship between components determines the order of operations. + +## Attributes + +You use _Attributes_ to configure individual settings. +Attributes always take the form of `ATTRIBUTE_NAME = ATTRIBUTE_VALUE`. + +The following example shows how to set the `log_level` attribute to `"debug"`. + +```river +log_level = "debug" +``` + +## Expressions + +You use expressions to compute the value of an attribute. +The simplest expressions are constant values like `"debug"`, `32`, or `[1, 2, 3, 4]`. +River supports complex expressions, for example: + +* Referencing the exports of components: `local.file.password_file.content` +* Mathematical operations: `1 + 2`, `3 * 4`, `(5 * 6) + (7 + 8)` +* Equality checks: `local.file.file_a.content == local.file.file_b.content` +* Calling functions from River's standard library: `env("HOME")` retrieves the value of the `HOME` environment variable. + +You can use expressions for any attribute inside a component definition. + +### Referencing component exports + +The most common expression is to reference the exports of a component, for example, `local.file.password_file.content`. +You form a reference to a component's exports by merging the component's name (for example, `local.file`), +label (for example, `password_file`), and export name (for example, `content`), delimited by a period. + +## Blocks + +You use _Blocks_ to configure components and groups of attributes. +Each block can contain any number of attributes or nested blocks. + +```river +prometheus.remote_write "default" { + endpoint { + url = "http://localhost:9009/api/prom/push" + } +} +``` + +The preceding example has two blocks: + +* `prometheus.remote_write "default"`: A labeled block which instantiates a `prometheus.remote_write` component. + The label is the string `"default"`. +* `endpoint`: An unlabeled block inside the component that configures an endpoint to send metrics to. + This block sets the `url` attribute to specify the endpoint. + + +## Tooling + +You can use one or all of the following tools to help you write configuration files in River. + +* Experimental editor support for + * [vim](https://github.com/rfratto/vim-river) + * [VSCode](https://github.com/rfratto/vscode-river) + * [river-mode](https://github.com/jdbaldry/river-mode) for Emacs +* Code formatting using the [`agent fmt` command][fmt] + +You can also start developing your own tooling using the {{< param "PRODUCT_ROOT_NAME" >}} repository as a go package or use the +[tree-sitter grammar][] with other programming languages. + +[RFC]: https://github.com/grafana/agent/blob/97a55d0d908b26dbb1126cc08b6dcc18f6e30087/docs/rfcs/0005-river.md +[vim]: https://github.com/rfratto/vim-river +[VSCode]: https://github.com/rfratto/vscode-river +[river-mode]: https://github.com/jdbaldry/river-mode +[tree-sitter grammar]: https://github.com/grafana/tree-sitter-river + +{{% docs/reference %}} +[fmt]: "/docs/agent/ -> /docs/agent//flow/reference/cli/fmt" +[fmt]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/fmt" +{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/components.md b/docs/sources/flow/concepts/config-language/components.md new file mode 100644 index 000000000000..967d2437da8c --- /dev/null +++ b/docs/sources/flow/concepts/config-language/components.md @@ -0,0 +1,104 @@ +--- +aliases: +- ../configuration-language/components/ # /docs/agent/latest/flow/concepts/configuration-language/components/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/components/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/components/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/components/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/components/ +# Previous page aliases for backwards compatibility: +- ../../configuration-language/components/ # /docs/agent/latest/flow/configuration-language/components/ +- /docs/grafana-cloud/agent/flow/config-language/components/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/components/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/components/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/components/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/components/ +description: Learn about the components configuration language +title: Components configuration language +weight: 300 +--- + +# Components configuration language + +Components are the defining feature of {{< param "PRODUCT_NAME" >}}. +Components are small, reusable pieces of business logic that perform a single task like retrieving secrets or collecting Prometheus metrics, +and you can wire them together to form programmable pipelines of telemetry data. + +The [_component controller_][controller] is responsible for scheduling components, reporting their health and debug status, re-evaluating their arguments, and providing their exports. + +## Configuring components + +You create [components][] by defining a top-level River block. +All components are identified by their name, describing what the component is responsible for, and a user-specified _label_. + +## Arguments and exports + +Most user interactions with components center around two basic concepts, _arguments_ and _exports_. + +* _Arguments_ are settings that modify the behavior of a component. + They can be any number of attributes or nested unlabeled blocks, some required and some optional. + Any optional arguments that aren't set take on their default values. + +* _Exports_ are zero or more output values that other components can refer to and can be of any River type. + +The following block defines a `local.file` component labeled "targets". +The `local.file.targets` component exposes the file `content` as a string in its exports. + +The `filename` attribute is a _required_ argument. +You can also define a number of _optional_ arguments, in this case, `detector`, `poll_frequency`, and `is_secret`, +that configure how and how often the file should be polled and whether its contents are sensitive. + +```river +local.file "targets" { + // Required argument + filename = "/etc/agent/targets" + + // Optional arguments: Components may have some optional arguments that + // do not need to be defined. + // + // The optional arguments for local.file are is_secret, detector, and + // poll_frequency. + + // Exports: a single field named `content` + // It can be referred to as `local.file.targets.content` +} +``` + +## Referencing components + +To wire components together, one can use the exports of one as the arguments to another by using references. +References can only appear in components. + +For example, here's a component that scrapes Prometheus metrics. +The `targets` field is populated with two scrape targets, a constant target `localhost:9001` and an expression that ties the target to the value of `local.file.targets.content`. + +```river +prometheus.scrape "default" { + targets = [ + { "__address__" = local.file.targets.content }, // tada! + { "__address__" = "localhost:9001" }, + ] + + forward_to = [prometheus.remote_write.default.receiver] + scrape_config { + job_name = "default" + } +} +``` + +Each time the file contents change, the `local.file` updates its exports. The new value is sent to the `prometheus.scrape` targets field. + +Each argument and exported field has an underlying [type][]. +River checks the expression type before assigning a value to an attribute. +The documentation of each [component][components] provides more information about how to wire components together. + +In the previous example, the contents of the `local.file.targets.content` expression is evaluated to a concrete value. +The value is type-checked and substituted into `prometheus.scrape.default`, where you can configure it. + +{{% docs/reference %}} +[components]: "/docs/agent/ -> /docs/agent//flow/reference/components" +[components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" +[controller]: "/docs/agent/ -> /docs/agent//flow/concepts/component_controller" +[controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller" +[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" +[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" +{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/expressions/_index.md b/docs/sources/flow/concepts/config-language/expressions/_index.md new file mode 100644 index 000000000000..56dc4c1ee4a1 --- /dev/null +++ b/docs/sources/flow/concepts/config-language/expressions/_index.md @@ -0,0 +1,38 @@ +--- +aliases: +- ../configuration-language/expressions/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/ +# Previous page aliases for backwards compatibility: +- ../../configuration-language/expressions/ # /docs/agent/latest/flow/configuration-language/expressions/ +- /docs/grafana-cloud/agent/flow/config-language/expressions/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/ +description: Learn about expressions +title: Expressions +weight: 400 +--- + +# Expressions + +Expressions represent or compute values you can assign to attributes within a configuration. + +Basic expressions are literal values, like `"Hello, world!"` or `true`. +Expressions may also do things like [refer to values][] exported by components, perform arithmetic, or [call functions][]. + +You use expressions when you configure any component. +All component arguments have an underlying [type][]. +River checks the expression type before assigning the result to an attribute. + +{{% docs/reference %}} +[refer to values]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/referencing_exports" +[refer to values]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/referencing_exports" +[call functions]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/function_calls" +[call functions]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/function_calls" +[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" +[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" +{{% /docs/reference %}} diff --git a/docs/sources/flow/concepts/config-language/expressions/function_calls.md b/docs/sources/flow/concepts/config-language/expressions/function_calls.md new file mode 100644 index 000000000000..b9598fea91a1 --- /dev/null +++ b/docs/sources/flow/concepts/config-language/expressions/function_calls.md @@ -0,0 +1,43 @@ +--- +aliases: +- ../../configuration-language/expressions/function-calls/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/function-calls/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/function_calls/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/function_calls/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/function_calls/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/function_calls/ +# Previous page aliases for backwards compatibility: +- ../../../configuration-language/expressions/function-calls/ # /docs/agent/latest/flow/configuration-language/expressions/function-calls/ +- /docs/grafana-cloud/agent/flow/config-language/expressions/function_calls/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/function_calls/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/function_calls/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/function_calls/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/function_calls/ +description: Learn about function calls +title: Function calls +weight: 400 +--- + +# Function calls + +You can use River function calls to build richer expressions. + +Functions take zero or more arguments as their input and always return a single value as their output. +You can't construct functions. You can call functions from River's standard library or export them from a component. + +If a function fails, the expression isn't evaluated, and an error is reported. + +## Standard library functions + +River contains a [standard library][] of functions. +Some functions enable interaction with the host system, for example, reading from an environment variable. +Some functions allow for more complex expressions, for example, concatenating arrays or decoding JSON strings into objects. + +```river +env("HOME") +json_decode(local.file.cfg.content)["namespace"] +``` + +{{% docs/reference %}} +[standard library]: "/docs/agent/ -> /docs/agent//flow/reference/stdlib" +[standard library]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/stdlib" +{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/expressions/operators.md b/docs/sources/flow/concepts/config-language/expressions/operators.md new file mode 100644 index 000000000000..19bb003f74f3 --- /dev/null +++ b/docs/sources/flow/concepts/config-language/expressions/operators.md @@ -0,0 +1,125 @@ +--- +aliases: +- ../../configuration-language/expressions/operators/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/operators/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/operators/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/operators/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/operators/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/operators/ +# Previous page aliases for backwards compatibility: +- ../../../configuration-language/expressions/operators/ # /docs/agent/latest/flow/configuration-language/expressions/operators/ +- /docs/grafana-cloud/agent/flow/config-language/expressions/operators/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/operators/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/operators/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/operators/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/operators/ +description: Learn about operators +title: Operators +weight: 300 +--- + +# Operators + +River uses a common set of operators. +All operations follow the standard [PEMDAS][] order of mathematical operations. + +## Arithmetic operators + +Operator | Description +---------|--------------------------------------------------- +`+` | Adds two numbers. +`-` | Subtracts two numbers. +`*` | Multiplies two numbers. +`/` | Divides two numbers. +`%` | Computes the remainder after dividing two numbers. +`^` | Raises the number to the specified power. + +## String operators + +Operator | Description +---------|------------------------- +`+` | Concatenate two strings. + +## Comparison operators + +Operator | Description +---------|--------------------------------------------------------------------- +`==` | `true` when two values are equal. +`!=` | `true` when two values aren't equal. +`<` | `true` when the left value is less than the right value. +`<=` | `true` when the left value is less than or equal to the right value. +`>` | `true` when the left value is greater than the right value. +`>=` | `true` when the left value is greater or equal to the right value. + +You can apply the equality operators `==` and `!=` to any operands. + +The two operands in ordering operators `<` `<=` `>` and `>=` must both be _orderable_ and of the same type. +The results of the comparisons are: + +* Boolean values are equal if they're either both true or both false. +* Numerical (integer and floating-point) values are orderable in the usual way. +* String values are orderable lexically byte-wise. +* Objects are equal if all their fields are equal. +* Array values are equal if their corresponding elements are equal. + +## Logical operators + +Operator | Description +---------|--------------------------------------------------------- +`&&` | `true` when the both left _and_ right value are `true`. +`\|\|` | `true` when the either left _or_ right value are `true`. +`!` | Negates a boolean value. + +Logical operators apply to boolean values and yield a boolean result. + +## Assignment operator + +River uses `=` as its assignment operator. + +An assignment statement may only assign a single value. +Each value must be _assignable_ to the attribute or object key. + +* You can assign `null` to any attribute. +* You can assign numerical, string, boolean, array, function, capsule, and object types to attributes of the corresponding type. +* You can assign numbers to string attributes with an implicit conversion. +* You can assign strings to numerical attributes if they represent a number. +* You can't assign blocks. + +## Brackets + +Brackets | Description +---------|------------------------------------ +`{ }` | Defines blocks and objects. +`( )` | Groups and prioritizes expressions. +`[ ]` | Defines arrays. + +The following example uses curly braces and square brackets to define an object and an array. + +```river +obj = { app = "agent", namespace = "dev" } +arr = [1, true, 7 * (1+1), 3] +``` + +## Access operators + +Operator | Description +---------|------------------------------------------------------------------------ +`[ ]` | Access a member of an array or object. +`.` | Access a named member of an object or an exported field of a component. + +You can access arbitrarily nested values with River's access operators. +You can use square brackets to access zero-indexed array indices and object fields by enclosing the field name in double quotes. +You can use the dot operator to access object fields without double quotes and component exports. + +```river +obj["app"] +arr[1] + +obj.app +local.file.token.content +``` + +If you use the `[ ]` operator to access a member of an object where the member doesn't exist, the resulting value is `null`. + +If you use the `.` operator to access a named member of an object where the named member doesn't exist, an error is generated. + +[PEMDAS]: https://en.wikipedia.org/wiki/Order_of_operations diff --git a/docs/sources/flow/concepts/config-language/expressions/referencing_exports.md b/docs/sources/flow/concepts/config-language/expressions/referencing_exports.md new file mode 100644 index 000000000000..2cc7a8ca5b21 --- /dev/null +++ b/docs/sources/flow/concepts/config-language/expressions/referencing_exports.md @@ -0,0 +1,66 @@ +--- +aliases: +- ../../configuration-language/expressions/referencing-exports/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/referencing-exports/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/referencing_exports/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/referencing_exports/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/referencing_exports/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/referencing_exports/ +# Previous page aliases for backwards compatibility: +- ../../../configuration-language/expressions/referencing-exports/ # /docs/agent/latest/flow/configuration-language/expressions/referencing-exports/ +- /docs/grafana-cloud/agent/flow/config-language/expressions/referencing_exports/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/referencing_exports/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/referencing_exports/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/referencing_exports/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/referencing_exports/ +description: Learn about referencing component exports +title: Referencing component exports +weight: 200 +--- + +# Referencing component exports + +Referencing exports enables River to configure and connect components dynamically using expressions. +While components can work in isolation, they're more useful when one component's behavior and data flow are bound to the exports of another, +building a dependency relationship between the two. + +Such references can only appear as part of another component's arguments or a configuration block's fields. +Components can't reference themselves. + +## Using references + +You build references by combining the component's name, label, and named export with dots. + +For example, you can reference the contents of a file exported by the `local.file` component labeled `target` as `local.file.target.content`. +Similarly, a `prometheus.remote_write` component instance labeled `onprem` exposes its receiver for metrics on `prometheus.remote_write.onprem.receiver`. + +The following example shows some references. + +```river +local.file "target" { + filename = "/etc/agent/target" +} + +prometheus.scrape "default" { + targets = [{ "__address__" = local.file.target.content }] + forward_to = [prometheus.remote_write.onprem.receiver] +} + +prometheus.remote_write "onprem" { + endpoint { + url = "http://prometheus:9009/api/prom/push" + } +} +``` + +In the preceding example, you wired together a very simple pipeline by writing a few River expressions. + +![Flow of example pipeline](/media/docs/agent/flow_referencing_exports_diagram.svg) + +After the value is resolved, it must match the [type][] of the attribute it is assigned to. +While you can only configure attributes using the basic River types, +the exports of components can take on special internal River types, such as Secrets or Capsules, which expose different functionality. + +{{% docs/reference %}} +[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" +[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" +{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md b/docs/sources/flow/concepts/config-language/expressions/types_and_values.md new file mode 100644 index 000000000000..1f27c0b5ecac --- /dev/null +++ b/docs/sources/flow/concepts/config-language/expressions/types_and_values.md @@ -0,0 +1,224 @@ +--- +aliases: +- ../../configuration-language/expressions/types-and-values/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/types-and-values/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/types_and_values/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/types_and_values/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/types_and_values/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values/ +# Previous page aliases for backwards compatibility: +- ../../../configuration-language/expressions/types-and-values/ # /docs/agent/latest/flow/configuration-language/expressions/types-and-values/ +- /docs/grafana-cloud/agent/flow/config-language/expressions/types_and_values/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/types_and_values/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/types_and_values/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/types_and_values/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/types_and_values/ +description: Learn about the River types and values +title: Types and values +weight: 100 +--- + +# Types and values + +River uses the following types for its values: + +* `number`: Any numeric value, like `3` or `3.14`. +* `string`: A sequence of Unicode characters representing text, like `"Hello, world!"`. +* `bool`: A boolean value, either `true` or `false`. +* `array`: A sequence of values, like `[1, 2, 3]`. Elements within the list are indexed by whole numbers, starting with zero. +* `object`: A group of values identified by named labels, like `{ name = "John" }`. +* `function`: A value representing a routine that runs with arguments to compute another value, like `env("HOME")`. + Functions take zero or more arguments as input and always return a single value as output. +* `null`: A type that has no value. + +## Naming convention + +In addition to the preceding types, the [component reference][] documentation uses the following conventions for referring to types: + +* `any`: A value of any type. +* `map(T)`: an `object` with the value type `T`. + For example, `map(string)` is an object where all the values are strings. + The key type of an object is always a string or an identifier converted into a string. +* `list(T)`: an `array` with the value type`T`. + For example, `list(string)` is an array where all the values are strings. +* `duration`: a `string` denoting a duration of time, such as `"1d"`, `"1h30m"`, `"10s"`. + Valid units are: + + * `d` for days. + * `h` for hours. + * `m` for minutes. + * `s` for seconds. + * `ms` for milliseconds. + * `ns` for nanoseconds. + + You can combine values of descending units to add their values together. For example, `"1h30m"` is the same as `"90m"`. + +## Numbers + +River handles integers, unsigned integers, and floating-point values as a single 'number' type, simplifying writing and reading River configuration files. + +```river +3 == 3.00 // true +5.0 == (10 / 2) // true +1e+2 == 100 // true +2e-3 == 0.002 // true +``` + +## Strings + +Strings are represented by sequences of Unicode characters surrounded by double quotes `""`. + +```river +"Hello, world!" +``` + +A `\` in a string starts an escape sequence to represent a special character. +The following table shows the supported escape sequences. + +| Sequence | Replacement | +|--------------|-----------------------------------------------------------------------------------------| +| `\\` | The `\` character `U+005C` | +| `\a` | The alert or bell character `U+0007` | +| `\b` | The backspace character `U+0008` | +| `\f` | The formfeed character `U+000C` | +| `\n` | The newline character `U+000A` | +| `\r` | The carriage return character `U+000D` | +| `\t` | The horizontal tab character `U+0009` | +| `\v` | The vertical tab character `U+000B` | +| `\'` | The `'` character `U+0027` | +| `\"` | The `"` character `U+0022`, which prevents terminating the string | +| `\NNN` | A literal byte (NNN is three octal digits) | +| `\xNN` | A literal byte (NN is two hexadecimal digits) | +| `\uNNNN` | A Unicode character from the basic multilingual plane (NNNN is four hexadecimal digits) | +| `\UNNNNNNNN` | A Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits) | + +## Raw strings + +Raw strings are represented by sequences of Unicode characters surrounded by backticks ``` `` ```. +Raw strings don't support any escape sequences. + +```river +`Hello, "world"!` +``` + +Within the backticks, any character may appear except a backtick. +You can include a backtick by concatenating a double-quoted string that contains a backtick using `+`. + +A multiline raw string is interpreted exactly as written. + +```river +`Hello, +"world"!` +``` + +The preceding multiline raw string is interpreted as a string with the following value. + +```string +Hello, +"world"! +``` + +## Bools + +Bools are represented by the symbols `true` and `false`. + +## Arrays + +You construct arrays with a sequence of comma-separated values surrounded by square brackets `[]`. + +```river +[0, 1, 2, 3] +``` + +You can place values in array elements on separate lines for readability. +A comma after the final value must be present if the closing bracket `]` is on a different line than the final value. + +```river +[ + 0, + 1, + 2, +] +``` + +## Objects + +You construct objects with a sequence of comma-separated key-value pairs surrounded by curly braces `{}`. + +```river +{ + first_name = "John", + last_name = "Doe", +} +``` + +You can omit the comma after the final key-value pair if the closing curly brace `}` is on the same line as the final pair. + +```river +{ name = "John" } +``` + +If the key isn't a valid identifier, you must wrap it in double quotes like a string. + +```river +{ + "app.kubernetes.io/name" = "mysql", + "app.kubernetes.io/instance" = "mysql-abcxyz", + namespace = "default", +} +``` + +{{% admonition type="note" %}} +Don't confuse objects with blocks. + +* An _object_ is a value assigned to an [Attribute][]. You **must** use commas between key-value pairs on separate lines. +* A [Block][] is a named structural element composed of multiple attributes. You **must not** use commas between attributes. + +[Attribute]: {{< relref "../syntax.md#Attributes" >}} +[Block]: {{< relref "../syntax.md#Blocks" >}} +{{% /admonition %}} + +## Functions + +You can't construct function values. You can call functions from the standard library or export them from a component. + +## Null + +The null value is represented by the symbol `null`. + +## Special types + +#### Secrets + +A `secret` is a special type of string that's never displayed to the user. +You can assign `string` values to an attribute expecting a `secret`, but never the inverse. +It's impossible to convert a secret to a string or assign a secret to an attribute expecting a string. + +#### Capsules + +A `capsule` is a special type that represents a category of _internal_ types used by {{< param "PRODUCT_NAME" >}}. +Each capsule type has a unique name and is represented to the user as `capsule("")`. +You can't construct capsule values. You can use capsules in expressions as any other type. +Capsules aren't inter-compatible, and an attribute expecting a capsule can only be given a capsule of the same internal type. +If an attribute expects a `capsule("prometheus.Receiver")`, you can only assign a `capsule("prometheus.Receiver")` type. +The specific type of capsule expected is explicitly documented for any component that uses or exports them. + +In the following example, the `prometheus.remote_write` component exports a `receiver`, which is a `capsule("prometheus.Receiver")` type. +You can use this capsule in the `forward_to` attribute of `prometheus.scrape`, which expects an array of `capsule("prometheus.Receiver")`. + +```river +prometheus.remote_write "default" { + endpoint { + url = "http://localhost:9090/api/v1/write" + } +} + +prometheus.scrape "default" { + targets = [/* ... */] + forward_to = [prometheus.remote_write.default.receiver] +} +``` + +{{% docs/reference %}} +[type]: "/docs/agent/ -> /docs/agent//flow/reference/components" +[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" +{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/files.md b/docs/sources/flow/concepts/config-language/files.md new file mode 100644 index 000000000000..bd5565635fe7 --- /dev/null +++ b/docs/sources/flow/concepts/config-language/files.md @@ -0,0 +1,26 @@ +--- +aliases: +- ../configuration-language/files/ # /docs/agent/latest/flow/concepts/configuration-language/files/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/files/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/files/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/files/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/files/ +# Previous page aliases for backwards compatibility: +- ../../configuration-language/files/ # /docs/agent/latest/flow/configuration-language/files/ +- /docs/grafana-cloud/agent/flow/config-language/files/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/files/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/files/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/files/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/files/ +description: Learn about River files +title: Files +weight: 100 +--- + +# Files + +River files are plain text files with the `.river` file extension. +You can refer to each River file as a "configuration file" or a "River configuration." + +River files must be UTF-8 encoded and can contain Unicode characters. +River files can use Unix-style line endings (LF) and Windows-style line endings (CRLF), but formatters may replace all line endings with Unix-style ones. diff --git a/docs/sources/flow/concepts/config-language/syntax.md b/docs/sources/flow/concepts/config-language/syntax.md new file mode 100644 index 000000000000..6f55701dab67 --- /dev/null +++ b/docs/sources/flow/concepts/config-language/syntax.md @@ -0,0 +1,125 @@ +--- +aliases: +- ../configuration-language/syntax/ # /docs/agent/latest/flow/concepts/configuration-language/syntax/ +- /docs/grafana-cloud/agent/flow/concepts/config-language/syntax/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/syntax/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/syntax/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/syntax/ +# Previous page aliases for backwards compatibility: +- ../../configuration-language/syntax/ # /docs/agent/latest/flow/configuration-language/syntax/ +- /docs/grafana-cloud/agent/flow/config-language/syntax/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/syntax/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/syntax/ +- /docs/grafana-cloud/send-data/agent/flow/config-language/syntax/ +canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/syntax/ +description: Learn about the River syntax +title: Syntax +weight: 200 +--- + +# Syntax + +The River syntax is easy to read and write. It has only two high-level elements, _Attributes_ and _Blocks_. + +River is a _declarative_ language used to build programmable pipelines. +The order of blocks and attributes within the River configuration file isn't important. +The language considers all direct and indirect dependencies between elements to determine their relationships. + +## Comments + +River configuration files support single-line `//` and block `/* */` comments. + +## Identifiers + +River considers an identifier as valid if it consists of one or more UTF-8 letters (A through Z, both upper- and lower-case), +digits or underscores, but doesn't start with a digit. + +## Attributes and Blocks + +### Attributes + +You use _Attributes_ to configure individual settings. +They always take the form of `ATTRIBUTE_NAME = ATTRIBUTE_VALUE`. +They can appear either as top-level elements or nested within blocks. + +The following example sets the `log_level` attribute to `"debug"`. + +```river +log_level = "debug" +``` + +The `ATTRIBUTE_NAME` must be a valid River [identifier][]. + +The `ATTRIBUTE_VALUE` can be either a constant value of a valid River [type][] (for example, a string, boolean, number), +or an [_expression_][expression] to represent or compute more complex attribute values. + +### Blocks + +You use _Blocks_ to configure the {{< param "PRODUCT_ROOT_NAME" >}}'s behavior as well as {{< param "PRODUCT_NAME" >}} +components by grouping any number of attributes or nested blocks using curly braces. +Blocks have a _name_, an optional _label_ and a body that contains any number of arguments and nested unlabeled blocks. + +Some blocks can be defined more than once. + +#### Examples + +You can use the following pattern to create an unlabeled block. + +```river +BLOCK_NAME { + // Block body can contain attributes and nested unlabeled blocks + IDENTIFIER = EXPRESSION // Attribute + + NESTED_BLOCK_NAME { + // Nested block body + } +} +``` + +You can use the following pattern to create a labeled block + +```river +// Pattern for creating a labeled block: +BLOCK_NAME "BLOCK_LABEL" { + // Block body can contain attributes and nested unlabeled blocks + IDENTIFIER = EXPRESSION // Attribute + + NESTED_BLOCK_NAME { + // Nested block body + } +} +``` + +#### Block naming rules + +The `BLOCK_NAME` has to be recognized by {{< param "PRODUCT_NAME" >}} as either a valid component name or a special block for configuring global settings. +If the `BLOCK_LABEL` must be set, it must be a valid River [identifier][] wrapped in double quotes. +In these cases, you use the label to disambiguate between multiple top-level blocks of the same name. + +The following snippet defines a block named `local.file` with its label set to "token". +The block's body sets `filename` to the content of the `TOKEN_FILE_PATH` environment variable by using an expression, +and the `is_secret` attribute is set to the boolean `true`, marking the file content as sensitive. + +```river +local.file "token" { + filename = env("TOKEN_FILE_PATH") // Use an expression to read from an env var. + is_secret = true +} +``` + +## Terminators + +All block and attribute definitions are followed by a newline, which River calls a _terminator_, as it terminates the current statement. + +A newline is treated as a terminator when it follows any expression, `]`, `)`, or `}`. +River ignores other newlines and you can can enter as many newlines as you want. + +[identifier]: #identifiers +[identifier]: #identifiers + +{{% docs/reference %}} +[expression]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions" +[expression]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions" +[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" +[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" +{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/configuration_language.md b/docs/sources/flow/concepts/configuration_language.md deleted file mode 100644 index 55671679714e..000000000000 --- a/docs/sources/flow/concepts/configuration_language.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -aliases: -- ../../concepts/configuration-language/ -- /docs/grafana-cloud/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/configuration_language/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/configuration_language/ -title: Configuration language concepts -description: Learn about configuration language concepts -weight: 400 ---- - -# Configuration language concepts - -The Grafana Agent Flow _configuration language_ refers to the language used in -configuration files which define and configure components to run. - -The configuration language is called River, a Terraform/HCL-inspired language: - -```river -prometheus.scrape "default" { - targets = [{ - "__address__" = "demo.robustperception.io:9090", - }] - forward_to = [prometheus.remote_write.default.receiver] -} - -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9009/api/prom/push" - } -} -``` - -River was designed with the following requirements in mind: - -* _Fast_: The configuration language must be fast so the component controller - can evaluate changes as quickly as possible. -* _Simple_: The configuration language must be easy to read and write to - minimize the learning curve. -* _Debuggable_: The configuration language must give detailed information when - there's a mistake in the configuration file. - -## Attributes - -_Attributes_ are used to configure individual settings. They always take the -form of `ATTRIBUTE_NAME = ATTRIBUTE_VALUE`. - -```river -log_level = "debug" -``` - -This sets the `log_level` attribute to `"debug"`. - -## Expressions - -Expressions are used to compute the value of an attribute. The simplest -expressions are constant values like `"debug"`, `32`, or `[1, 2, 3, 4]`. River -supports more complex expressions, such as: - -* Referencing the exports of components: `local.file.password_file.content` -* Mathematical operations: `1 + 2`, `3 * 4`, `(5 * 6) + (7 + 8)` -* Equality checks: `local.file.file_a.content == local.file.file_b.content` -* Calling functions from River's standard library: `env("HOME")` (retrieve the - value of the `HOME` environment variable) - -Expressions may be used for any attribute inside a component definition. - -### Referencing component exports - -The most common expression is to reference the exports of a component like -`local.file.password_file.content`. A reference to a component's exports is -formed by merging the component's name (e.g., `local.file`), label (e.g., -`password_file`), and export name (e.g., `content`), delimited by period. - -## Blocks - -_Blocks_ are used to configure components and groups of attributes. Each block -can contain any number of attributes or nested blocks. - -```river -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9009/api/prom/push" - } -} -``` - -This file has two blocks: - -* `prometheus.remote_write "default"`: A labeled block which instantiates a - `prometheus.remote_write` component. The label is the string `"default"`. - -* `endpoint`: An unlabeled block inside the component which configures an - endpoint to send metrics to. This block sets the `url` attribute to specify - what the endpoint is. - -## More information - -River is documented in detail in [Configuration language][config-docs] section -of the Grafana Agent Flow docs. - -{{% docs/reference %}} -[config-docs]: "/docs/agent/ -> /docs/agent//flow/config-language" -[config-docs]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language" -{{% /docs/reference %}} diff --git a/docs/sources/flow/concepts/modules.md b/docs/sources/flow/concepts/modules.md index 77cae3170d36..940357f30127 100644 --- a/docs/sources/flow/concepts/modules.md +++ b/docs/sources/flow/concepts/modules.md @@ -4,62 +4,57 @@ aliases: - /docs/grafana-cloud/agent/flow/concepts/modules/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/modules/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/modules/ +- /docs/grafana-cloud/send-data/agent/flow/concepts/modules/ canonical: https://grafana.com/docs/agent/latest/flow/concepts/modules/ -title: Modules description: Learn about modules +title: Modules weight: 300 --- # Modules -_Modules_ are a way to create Grafana Agent Flow configurations which can be -loaded as a component. Modules are a great way to parameterize a configuration -to create reusable pipelines. +You use _Modules_ to create {{< param "PRODUCT_NAME" >}} configurations that you can load as a component. +Modules are a great way to parameterize a configuration to create reusable pipelines. -Modules are Grafana Agent Flow configurations which have: +Modules are {{< param "PRODUCT_NAME" >}} configurations which have: -* Arguments: settings which configure a module. -* Exports: named values which a module exposes to the consumer of the module. -* Components: Grafana Agent Flow Components to run when the module is running. +* _Arguments_: Settings that configure a module. +* _Exports_: Named values that a module exposes to the consumer of the module. +* _Components_: {{< param "PRODUCT_NAME" >}} components to run when the module is running. -Modules are loaded into Grafana Agent Flow by using a [Module -loader](#module-loaders). +You use a [Module loader][] to load Modules into {{< param "PRODUCT_NAME" >}}. -Refer to the documentation for the [argument block][] and [export block][] to -learn how to define arguments and exports for a module. +Refer to [argument block][] and [export block][] to learn how to define arguments and exports for a module. ## Module loaders -A _Module loader_ is a Grafana Agent Flow component which retrieves a module -and runs the components defined inside of it. +A _Module loader_ is a {{< param "PRODUCT_NAME" >}} component that retrieves a module and runs the defined components. -Module loader components are responsible for: +Module loader components are responsible for the following functions: -* Retrieving the module source to run. -* Creating a [Component controller][] for the module to run in. +* Retrieving the module source. +* Creating a [Component controller][] for the module. * Passing arguments to the loaded module. * Exposing exports from the loaded module. -Module loaders typically are called `module.LOADER_NAME`. The list of module -loader components can be found in the list of Grafana Agent Flow -[Components][]. +Module loaders are typically called `module.LOADER_NAME`. +{{% admonition type="note" %}} Some module loaders may not support running modules with arguments or exports. -Refer to the documentation for the module loader you are using for more -information. +{{% /admonition %}} + +Refer to [Components][] for more information about the module loader components. ## Module sources -Modules are designed to be flexible, and can have their configuration retrieved -from anywhere, such as: +Modules are flexible, and you can retrieve their configuration anywhere, such as: -* The local filesystem -* An S3 bucket -* An HTTP endpoint +* The local filesystem. +* An S3 bucket. +* An HTTP endpoint. -Each module loader component will support different ways of retrieving module -sources. The most generic module loader component, `module.string`, can load -modules from the export of another Flow component: +Each module loader component supports different ways of retrieving `module.sources`. +The most generic module loader component, `module.string`, can load modules from the export of another {{< param "PRODUCT_NAME" >}} component. ```river local.file "my_module" { @@ -79,14 +74,13 @@ module.string "my_module" { ## Example module -This example module manages a pipeline which filters out debug- and info-level -log lines which are given to it: +This example module manages a pipeline that filters out debug-level and info-level log lines. ```river -// argument.write_to is a required argument which specifies where filtered -// log lines should be sent. +// argument.write_to is a required argument that specifies where filtered +// log lines are sent. // -// The value of the argument can be retrieved in this file with +// The value of the argument is retrieved in this file with // argument.write_to.value. argument "write_to" { optional = false @@ -105,7 +99,7 @@ loki.process "filter" { forward_to = argument.write_to.value } -// export.filter_input exports a value to the consumer of the module. +// export.filter_input exports a value to the module consumer. export "filter_input" { // Expose the receiver of loki.process so the module consumer can send // logs to our loki.process component. @@ -113,8 +107,7 @@ export "filter_input" { } ``` -The module above can be saved to a file and then used as a processing step -before writing logs to Loki: +You can save the module to a file and then use it as a processing step before writing logs to Loki. ```river loki.source.file "self" { @@ -140,13 +133,15 @@ loki.write "default" { } ``` +[Module loader]: #module-loaders + {{% docs/reference %}} [argument block]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/argument.md" -[argument block]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/argument.md" +[argument block]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/argument.md" [export block]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/export.md" -[export block]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/export.md" +[export block]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/export.md" [Component controller]: "/docs/agent/ -> /docs/agent//flow/concepts/component_controller.md" -[Component controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/component_controller.md" +[Component controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller.md" [Components]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components" +[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" {{% /docs/reference %}} diff --git a/docs/sources/flow/config-language/_index.md b/docs/sources/flow/config-language/_index.md deleted file mode 100644 index a363b2977642..000000000000 --- a/docs/sources/flow/config-language/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/ -- configuration-language/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/ -title: Configuration language -description: Learn about the configuration language -weight: 400 ---- - -# Configuration language - -Grafana Agent Flow contains a custom configuration language called River to -dynamically configure and connect components. - -River aims to reduce errors in configuration files by making configurations -easier to read and write. River configurations are done in blocks which can be -easily copied-and-pasted from documentation to help users get started as -quickly as possible. - -A River configuration file tells Grafana Agent Flow which components to launch -and how to bind them together into a pipeline. - -The syntax of River is centered around blocks, attributes, and expressions: - -```river -// Create a local.file component labeled my_file. -// This can be referenced by other components as local.file.my_file. -local.file "my_file" { - filename = "/tmp/my-file.txt" -} - -// Pattern for creating a labeled block, which the above block follows: -BLOCK_NAME "BLOCK_LABEL" { - // Block body - IDENTIFIER = EXPRESSION // Attribute -} - -// Pattern for creating an unlabeled block: -BLOCK_NAME { - // Block body - IDENTIFIER = EXPRESSION // Attribute -} -``` - -> You may have noticed that River looks similar to HCL, the language used by -> Terraform and other Hashicorp projects. River was inspired by HCL, but is a -> distinct language with different syntax and features, such as first-class -> functions. If you are already familiar with HCL or Terraform, writing River -> should seem mostly natural to you. - -> For historical context on why we decided to introduce River, you can read the -> original [RFC][]. - -* Blocks are a group of related settings, and usually represent creating a - component. Blocks have a name which consist of zero or more identifiers - separated by `.` (like `my_block` or `local.file` above), an optional user - label, and a body which contains attributes and nested blocks. - -* Attributes appear within blocks and assign a value to a name. - -* Expressions represent a value, either literally or by referencing and - combining other values. Expressions are used to compute a value for an - attribute. - -River is declarative, so the ordering of components, blocks, and attributes -within a block is not significant. The order of operations is determined by the -relationship between components. - -[RFC]: https://github.com/grafana/agent/blob/97a55d0d908b26dbb1126cc08b6dcc18f6e30087/docs/rfcs/0005-river.md - -## Tooling - -To help you write configuration files in River, the following tools are available: - -* Experimental editor support for - * [vim](https://github.com/rfratto/vim-river) - * [VSCode](https://github.com/rfratto/vscode-river) - * [river-mode](https://github.com/jdbaldry/river-mode) for Emacs -* Code formatting using the [`agent fmt` command]({{< relref "../reference/cli/fmt" >}}) - -You can also start developing your own tooling using the agent repository as a -go package or use the [tree-sitter -grammar](https://github.com/grafana/tree-sitter-river) with other programming languages. diff --git a/docs/sources/flow/config-language/components.md b/docs/sources/flow/config-language/components.md deleted file mode 100644 index 2e04643e1d53..000000000000 --- a/docs/sources/flow/config-language/components.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -aliases: -- ../configuration-language/components/ -- /docs/grafana-cloud/agent/flow/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/components/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/components/ -title: Components configuration language -description: Learn about the components configuration language -weight: 300 ---- - -# Components configuration language -Components are the defining feature of Grafana Agent Flow. They are small, -reusable pieces of business logic that perform a single task (like retrieving -secrets or collecting Prometheus metrics) and can be wired together to form -programmable pipelines of telemetry data. - -Under the hood, components are orchestrated via the [_component -controller_]({{< relref "../concepts/component_controller.md" >}}), which is -responsible for scheduling them, reporting their health and debug status, -re-evaluating their arguments and providing their exports. - -## Configuring components -Components are created by defining a top-level River block. All components -are identified by their name, describing what the component is responsible for, -and a user-specified _label_. - -The [components docs]({{< relref "../reference/components/_index.md" >}}) contain a list -of all available components. Each one has a complete reference page, so getting -a component to work for you should be as easy as reading its documentation and -copy/pasting from an example. - -## Arguments and exports -Most user interactions with components will center around two basic concepts; -_arguments_ and _exports_. - -* _Arguments_ are settings which modify the behavior of a component. They can - be any number of attributes or nested unlabeled blocks, some of them being -required and some being optional. Any optional arguments that are not set will -take on their default values. - -* _Exports_ are zero or more output values that can be referred to by other - components, and can be of any River type. - -Here's a quick example; the following block defines a `local.file` component -labeled "targets". The `local.file.targets` component will then expose the -file `content` as a string in its exports. - -The `filename` attribute is a _required_ argument; the user can also define a -number of _optional_ ones, in this case `detector`, `poll_frequency` and -`is_secret`, which configure how and how often the file should be polled -as well as whether its contents are sensitive or not. - -```river -local.file "targets" { - // Required argument - filename = "/etc/agent/targets" - - // Optional arguments: Components may have some optional arguments that - // do not need to be defined. - // - // The optional arguments for local.file are is_secret, detector, and - // poll_frequency. - - // Exports: a single field named `content` - // It can be referred to as `local.file.targets.content` -} -``` - -## Referencing components -To wire components together, one can use the exports of one as the arguments -to another by using references. References can only appear in components. - -For example, here's a component that scrapes Prometheus metrics. The `targets` -field is populated with two scrape targets; a constant one `localhost:9001` and -an expression that ties the target to the value of -`local.file.targets.content`. - -```river -prometheus.scrape "default" { - targets = [ - { "__address__" = local.file.targets.content }, // tada! - { "__address__" = "localhost:9001" }, - ] - - forward_to = [prometheus.remote_write.default.receiver] - scrape_config { - job_name = "default" - } -} -``` - -Every time the file contents change, the `local.file` will update its exports, -so the new value will be provided to the `prometheus.scrape` targets field. - -Each argument and exported field has an underlying [type]({{< relref "./expressions/types_and_values.md" >}}). -River will type-check expressions before assigning a value to an attribute; the -documentation of each component will have more information about the ways that -you can wire components together. - -In the previous example, the contents of the `local.file.targets.content` -expression must first be evaluated in a concrete value then type-checked and -substituted into `prometheus.scrape.default` for it to be configured in turn. diff --git a/docs/sources/flow/config-language/expressions/_index.md b/docs/sources/flow/config-language/expressions/_index.md deleted file mode 100644 index 5693e91ceeb7..000000000000 --- a/docs/sources/flow/config-language/expressions/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -aliases: -- ../configuration-language/expressions/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/expressions/ -title: Expressions -description: Learn about expressions -weight: 400 ---- - -# Expressions - -Expressions represent or compute values that can be assigned to attributes -within a configuration. - -Basic expressions are literal values, like `"Hello, world!"` or `true`. -Expressions may also do things like [refer to values][] exported by components, -perform arithmetic, or [call functions][]. - -Expressions can be used when configuring any component. As all component -arguments have an underlying [type][], River will type-check expressions before -assigning the result to an attribute. - -[refer to values]: {{< relref "./referencing_exports.md" >}} -[call functions]: {{< relref "./function_calls.md" >}} -[type]: {{< relref "./types_and_values.md" >}} - diff --git a/docs/sources/flow/config-language/expressions/function_calls.md b/docs/sources/flow/config-language/expressions/function_calls.md deleted file mode 100644 index 77fab0d2df53..000000000000 --- a/docs/sources/flow/config-language/expressions/function_calls.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -aliases: -- ../../configuration-language/expressions/function-calls/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/function_calls/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/expressions/function_calls/ -title: Function calls -description: Learn about function calls -weight: 400 ---- - -# Function calls -Function calls is one more River feature that lets users build richer -expressions. - -Functions take zero or more arguments as their input and always return a single -value as their output. Functions cannot be constructed by users, but can be -either called from River's standard library, or when exported by a component. - -In case a function fails, the expression will not be evaluated and an error -will be reported. - -## Standard library functions -River contains a [standard library][] of useful functions. Some enable -interaction with the host system (e.g. reading from an environment variable), or -allow for more complex expressions (e.g. concatenating arrays or decoding JSON -strings into objects). -```river -env("HOME") -json_decode(local.file.cfg.content)["namespace"] -``` - -[standard library]: {{< relref "../../reference/stdlib" >}} diff --git a/docs/sources/flow/config-language/expressions/operators.md b/docs/sources/flow/config-language/expressions/operators.md deleted file mode 100644 index c9c724952d81..000000000000 --- a/docs/sources/flow/config-language/expressions/operators.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -aliases: -- ../../configuration-language/expressions/operators/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/operators/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/expressions/operators/ -title: Operators -description: Learn about operators -weight: 300 ---- - -# Operators -River uses a set of operators that most should be familiar with. All operations -follow the standard [PEMDAS](https://en.wikipedia.org/wiki/Order_of_operations) -rule for operator precedence. - -## Arithmetic operators - -Operator | Description --------- | ----------- -`+` | Adds two numbers. -`-` | Subtracts two numbers. -`*` | Multiplies two numbers. -`/` | Divides two numbers. -`%` | Computes the remainder after dividing two numbers. -`^` | Raises the number to the specified power. - -## String operators - -Operator | Description --------- | ----------- -`+` | Concatenate two strings. - -## Comparison operators - -Operator | Description --------- | ----------- -`==` | `true` when two values are equal. -`!=` | `true` when two values are not equal. -`<` | `true` when the left value is less than the right value. -`<=` | `true` when the left value is less than or equal to the right value. -`>` | `true` when the left value is greater than the right value. -`>=` | `true` when the left value is greater or equal to the right value. - -The equality operators `==` and `!=` can be applied to any operands. - -On the other hand, for the ordering operators `<` `<=` `>` and `>=` the two -operands must both be _orderable_ and of the same type. The result of the -comparisons are defined as follows: - -* Boolean values are equal if they are either both true or both false. -* Numerical (integer and floating-point) values are orderable, in the usual - way. -* String values are orderable lexically byte-wise. -* Objects are equal if all their fields are equal. -* Array values are equal if their corresponding elements are equal. - -## Logical operators - -Operator | Description --------- | ----------- -`&&` | `true` when the both left _and_ right value are `true`. -`\|\|` | `true` when the either left _or_ right value are `true`. -`!` | Negates a boolean value. - -Logical operators apply to boolean values and yield a boolean result. - -## Assignment operator -River uses `=` as its assignment operator. - -An assignment statement may only assign a single value. -In assignments, each value must be _assignable_ to the attribute or object key -to which it is being assigned. - -* The `null` value can be assigned to any attribute. -* Numerical, string, boolean, array, function, capsule and object types are - assignable to attributes of the corresponding type. -* Numbers can be assigned to string attributes with an implicit conversion. -* Strings can be assigned to numerical attributes, provided that they represent - a number. -* Blocks are not assignable. - -## Brackets - -Brackets | Description --------- | ----------- -`{ }` | Defines blocks and objects. -`( )` | Groups and prioritizes expressions. -`[ ]` | Defines arrays. - -In the following example we can see the use of curly braces and square brackets -to define an object and an array. -```river -obj = { app = "agent", namespace = "dev" } -arr = [1, true, 7 * (1+1), 3] -``` - -## Access operators - -Operator | Description --------- | ----------- -`[ ]` | Access a member of an array or object. -`.` | Access a named member of an object or an exported field of a component. - -River's access operators support accessing of arbitrarily nested values. -Square brackets can be used to access zero-indexed array indices as well as -object fields by enclosing the field name in double quotes. -The dot operator can be used to access both object fields (without double -quotes) and component exports. -```river -obj["app"] -arr[1] - -obj.app -local.file.token.content -``` - -If the `[ ]` operator is used to access a member of an object where the member -doesn't exist, the resulting value is `null`. - -If the `.` operator is used to access a named member of an object where the -named member doesn't exist, an error is generated. diff --git a/docs/sources/flow/config-language/expressions/referencing_exports.md b/docs/sources/flow/config-language/expressions/referencing_exports.md deleted file mode 100644 index 3a23508a63a6..000000000000 --- a/docs/sources/flow/config-language/expressions/referencing_exports.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -aliases: -- ../../configuration-language/expressions/referencing-exports/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/referencing_exports/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/expressions/referencing_exports/ -title: Referencing component exports -description: Learn about referencing component exports -weight: 200 ---- - -# Referencing component exports -Referencing exports is what enables River to dynamically configure and connect -components using expressions. While components can work in isolation, they're -more useful when one component's behavior and data flow is bound to the exports -of another, building a dependency relationship between the two. - -Such references can only appear as part of another component's arguments or a -config block's fields. That means that components cannot reference themselves. - -## Using references -These references are built by combining the component's name, label and named -export with dots. - -For example, the contents of a file exported by the `local.file` component -labeled `target` might be referenced as `local.file.target.content`. -Similarly, a `prometheus.remote_write` component instance labeled `onprem` will -expose its receiver for metrics on `prometheus.remote_write.onprem.receiver`. - -Let's see that in action: -```river -local.file "target" { - filename = "/etc/agent/target" -} - -prometheus.scrape "default" { - targets = [{ "__address__" = local.file.target.content }] - forward_to = [prometheus.remote_write.onprem.receiver] -} - -prometheus.remote_write "onprem" { - endpoint { - url = "http://prometheus:9009/api/prom/push" - } -} -``` - -In the previous example, we managed to wire together a very simple pipeline by -writing a few River expressions. - -

-Flow of example pipeline -

- -As with all expressions, once the value is resolved, it must match the [type][] -of the attribute being assigned to. While users can only configure attributes -using the basic River types, the exports of components can also take on special -internal River types such as Secrets or Capsules, which expose different -functionality. - - -[type]: {{< relref "./types_and_values.md" >}} diff --git a/docs/sources/flow/config-language/expressions/types_and_values.md b/docs/sources/flow/config-language/expressions/types_and_values.md deleted file mode 100644 index 764be479661c..000000000000 --- a/docs/sources/flow/config-language/expressions/types_and_values.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -aliases: -- ../../configuration-language/expressions/types-and-values/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/types_and_values/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/expressions/types_and_values/ -title: Types and values -description: Learn about the River types and values -weight: 100 ---- - -# Types and values - -## Types - -River uses the following types for its values: - -* `number`: Any numeric value, like `3` or `3.14`. -* `string`: A sequence of Unicode characters representing text, like `"Hello, world!"`. -* `bool`: A boolean value, either `true` or `false`. -* `array`: A sequence of values, like `[1, 2, 3]`. Elements within the - list are indexed by whole numbers, starting with zero. -* `object`: A group of values which are identified by named labels, like - `{ name = "John" }`. -* `function`: A value representing a routine which can be executed with - arguments to compute another value, like `env("HOME")`. Functions take zero - or more arguments as input and always return a single value as output. -* `null`: A type that has no value. - -### Naming convention - -In addition to the types above, [component reference][] documentation will use -the following conventions for referring to types: - -* `any`: A value of any type. -* `map(T)`: an `object` where the value type is `T`. For example, `map(string)` - is an object where all the values are strings. The key type of an object is - always a string, or an identifier which is converted into a string. -* `list(T)`: an `array` where the value type is `T`. For example, `list(string)` - is an array where all the values are strings. -* `duration`: a `string` denoting a duration of time, such as `"1d"`, `"1h30m"`, - `"10s"`. Valid units are `d` (for days), `h` (for hours), `m` (for minutes), - `s` (for seconds), `ms` (for milliseconds), `ns` (for nanoseconds). Values of - descending units can be combined to add their values together; `"1h30m"` is - the same as `"90m"`. - -[component reference]: {{< relref "../../reference/components" >}} - -## Numbers - -River handles integers, unsigned integers and floating-point values as a single -'number' type which simplifies writing and reading River configuration files. - -```river -3 == 3.00 // true -5.0 == (10 / 2) // true -1e+2 == 100 // true -2e-3 == 0.002 // true -``` - -## Strings - -Strings are represented by sequences of Unicode characters surrounded by double -quotes `""`: - -```river -"Hello, world!" -``` - -A `\` in a string starts an escape sequence to represent a special character. -The supported escape sequences are as follows: - -| Sequence | Replacement | -| -------- | ----------- | -| `\\` | The `\` character `U+005C` | -| `\a` | The alert or bell character `U+0007` | -| `\b` | The backspace character `U+0008` | -| `\f` | The formfeed character `U+000C` | -| `\n` | The newline character `U+000A` | -| `\r` | The carriage return character `U+000D` | -| `\t` | The horizontal tab character `U+0009` | -| `\v` | The vertical tab character `U+000B` | -| `\'` | The `'` character `U+0027` | -| `\"` | The `"` character `U+0022`, which prevents terminating the string | -| `\NNN` | A literal byte (NNN is three octal digits) | -| `\xNN` | A literal byte (NN is two hexadecimal digits) | -| `\uNNNN` | A Unicode character from the basic multilingual plane (NNNN is four hexadecimal digits) | -| `\UNNNNNNNN` | A Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits) | - -## Raw strings - -Raw strings are represented by sequences of Unicode characters surrounded by backticks ``` `` ```. -Raw strings do not support any escape sequences: - -```river -`Hello, "world"!` -``` - -Within the backticks, any character may appear except a backtick. A backtick -can be included by concatenating a double quoted string that contains a -backtick using `+`. - -A multiline raw string will be interpretted exactly as written: - -```river -`Hello, -"world"!` -``` - -is interpretted as a string with the value: - -```string -Hello, -"world"! -``` - -## Bools - -Bools are represented by the symbols `true` and `false`. - -## Arrays - -Array values are constructed by a sequence of comma separated values surrounded -by square brackets `[]`: - -```river -[0, 1, 2, 3] -``` - -Values in array elements may be placed on separate lines for readability. A -comma after the final value must be present if the closing bracket `]` -is on a different line as the final value: - -```river -[ - 0, - 1, - 2, -] -``` - -## Objects - -Object values are constructed by a sequence of comma separated key-value pairs -surrounded by curly braces `{}`: - -```river -{ - first_name = "John", - last_name = "Doe", -} -``` - -A comma after the final key-value pair may be omitted if the closing curly -brace `}` is on the same line as the final pair: - -```river -{ name = "John" } -``` - -If the key is not a valid identifier, it must be wrapped in double quotes like -a string: - -```river -{ - "app.kubernetes.io/name" = "mysql", - "app.kubernetes.io/instance" = "mysql-abcxyz", - namespace = "default", -} -``` - -> **NOTE**: Be careful not to confuse objects with blocks. -> -> An _object_ is a value assigned to an [Attribute][Attributes], where -> commas **must** be provided between key-value pairs on separate lines. -> -> A [Block][Blocks] is a named structural element composed of multiple attributes, -> where commas **must not** be provided between attributes. - -[Attributes]: {{< relref "../syntax.md#Attributes" >}} -[Blocks]: {{< relref "../syntax.md#Blocks" >}} - -## Functions - -Function values cannot be constructed by users, but can be called from the -standard library or when exported by a component. - -## Null - -The null value is represented by the symbol `null`. - -## Special Types - -#### Secrets - -A `secret` is a special type of string which is never displayed to the user. -`string` values may be assigned to an attribute expecting a `secret`, but never -the inverse; it is not possible to convert a secret to a string or assign a -secret to an attribute expecting a string. - -#### Capsules - -River has a special type called a `capsule`, which represents a category of -_internal_ types used by Flow. Each capsule type has a unique name and will be -represented to the user as `capsule("SOME_INTERNAL_NAME")`. -Capsule values cannot be constructed by the user, but can be used in -expressions as any other type. Capsules are not inter-compatible and an -attribute expecting a capsule can only be given a capsule of the same internal -type. That means, if an attribute expects a `capsule("prometheus.Receiver")`, -it can only be assigned a `capsule("prometheus.Receiver")` type. The specific -type of capsule expected is explicitly documented for any component which uses -or exports them. - -In the following example, the `prometheus.remote_write` component exports a -`receiver`, which is a `capsule("prometheus.Receiver")` type. This can then be -used in the `forward_to` attribute of `prometheus.scrape`, which -expects an array of `capsule("prometheus.Receiver")`s: - -```river -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9090/api/v1/write" - } -} - -prometheus.scrape "default" { - targets = [/* ... */] - forward_to = [prometheus.remote_write.default.receiver] -} -``` diff --git a/docs/sources/flow/config-language/files.md b/docs/sources/flow/config-language/files.md deleted file mode 100644 index 9e62da529231..000000000000 --- a/docs/sources/flow/config-language/files.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -aliases: -- ../configuration-language/files/ -- /docs/grafana-cloud/agent/flow/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/files/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/files/ -title: Files -description: Learn about River files -weight: 100 ---- - -# Files -River files are plaintext files with the `.river` file extension. Each River -file may be referred to as a "configuration file," or a "River configuration." - -River files are required to be UTF-8 encoded, and are permitted to contain -Unicode characters. River files can use both Unix-style line endings (LF) and -Windows-style line endings (CRLF), but formatters may replace all line endings -with Unix-style ones. diff --git a/docs/sources/flow/config-language/syntax.md b/docs/sources/flow/config-language/syntax.md deleted file mode 100644 index 5e21a5e37c1f..000000000000 --- a/docs/sources/flow/config-language/syntax.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -aliases: -- ../configuration-language/syntax/ -- /docs/grafana-cloud/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/syntax/ -canonical: https://grafana.com/docs/agent/latest/flow/config-language/syntax/ -title: Syntax -description: Learn about the River syntax -weight: 200 ---- - -# Syntax - -The River syntax is designed to be easy to read and write. Essentially, there -are just two high-level elements to it: _Attributes_ and _Blocks_. - -River is a _declarative_ language used to build programmable pipelines. -As such, the ordering of blocks and attributes within the River configuration -file is not important; the language will consider all direct and indirect -dependencies between elements to determine their relationships. - -## Comments - -River configuration files support single-line `//` as well as block `/* */` -comments. - -## Identifiers - -River considers an identifier as valid if it consists of one or more UTF-8 -letters (A through Z, both upper- and lower-case), digits or underscores, but -doesn't start with a digit. - -## Attributes and Blocks - -### Attributes - -_Attributes_ are used to configure individual settings. They always take the -form of `ATTRIBUTE_NAME = ATTRIBUTE_VALUE`. They can appear either as -top-level elements or nested within blocks. - -The following example sets the `log_level` attribute to `"debug"`. - -```river -log_level = "debug" -``` - -The `ATTRIBUTE_NAME` must be a valid River [identifier](#identifiers). - -The `ATTRIBUTE_VALUE` can be either a constant value of a valid River -[type]({{< relref "./expressions/types_and_values.md" >}}) (eg. string, -boolean, number) or an [_expression_]({{< relref "./expressions/_index.md" >}}) -to represent or compute more complex attribute values. - -### Blocks - -_Blocks_ are used to configure the Agent behavior as well as Flow components by -grouping any number of attributes or nested blocks using curly braces. -Blocks have a _name_, an optional _label_ and a body that contains any number -of arguments and nested unlabeled blocks. - -Some blocks support being defined more than once. - -#### Pattern for creating an unlabeled block - -```river -BLOCK_NAME { - // Block body can contain attributes and nested unlabeled blocks - IDENTIFIER = EXPRESSION // Attribute - - NESTED_BLOCK_NAME { - // Nested block body - } -} -``` - -#### Pattern for creating a labeled block - -```river -// Pattern for creating a labeled block: -BLOCK_NAME "BLOCK_LABEL" { - // Block body can contain attributes and nested unlabeled blocks - IDENTIFIER = EXPRESSION // Attribute - - NESTED_BLOCK_NAME { - // Nested block body - } -} -``` - -#### Block naming rules - -The `BLOCK_NAME` has to be recognized by Flow as either a valid component -name or a special block for configuring global settings. If the `BLOCK_LABEL` -has to be set, it must be a valid River [identifier](#identifiers) wrapped in -double quotes. In these cases the label will be used to disambiguate between -multiple top-level blocks of the same name. - -The following snippet defines a block named `local.file` with its label set to -"token". The block's body sets `filename` to the content of the `TOKEN_FILE_PATH` -environment variable by using an expression and the `is_secret` attribute is -set to the boolean `true`, marking the file content as sensitive. -```river -local.file "token" { - filename = env("TOKEN_FILE_PATH") // Use an expression to read from an env var. - is_secret = true -} -``` - -## Terminators - -All block and attribute definitions are followed by a newline, which River -calls a _terminator_, as it terminates the current statement. - -A newline is treated as terminator when it follows any expression, `]`, -`)` or `}`. Other newlines are ignored by River and and a user can enter as many -newlines as they want. diff --git a/docs/sources/flow/getting-started/_index.md b/docs/sources/flow/getting-started/_index.md deleted file mode 100644 index 79d34abe6f92..000000000000 --- a/docs/sources/flow/getting-started/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/ -- getting_started/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/ -menuTitle: Get started -title: Get started with Grafana Agent in flow mode -description: Learn how to use Grafana Agent in flow mode -weight: 200 ---- - -# Get started with Grafana Agent in flow mode - -This section details guides for getting started with Grafana Agent in flow mode. - -{{< section >}} diff --git a/docs/sources/flow/getting-started/collect-opentelemetry-data.md b/docs/sources/flow/getting-started/collect-opentelemetry-data.md deleted file mode 100644 index ecf484271d37..000000000000 --- a/docs/sources/flow/getting-started/collect-opentelemetry-data.md +++ /dev/null @@ -1,371 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-opentelemetry-data/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/collect-opentelemetry-data/ -title: Collect OpenTelemetry data -description: Learn how to collect OpenTelemetry data -weight: 300 ---- - -# Collect OpenTelemetry data - -Grafana Agent Flow can be configured to collect [OpenTelemetry][]-compatible -data and forward it to any OpenTelemetry-compatible endpoint. - -This topic describes how to: - -* Configure OpenTelemetry data delivery -* Configure batching -* Receive OpenTelemetry data over OTLP - -## Components used in this topic - -* [otelcol.auth.basic][] -* [otelcol.exporter.otlp][] -* [otelcol.exporter.otlphttp][] -* [otelcol.processor.batch][] -* [otelcol.receiver.otlp][] - -## Before you begin - -* Ensure that you have basic familiarity with instrumenting applications with - OpenTelemetry. -* Have a set of OpenTelemetry applications ready to push telemetry data to - Grafana Agent Flow. -* Identify where Grafana Agent Flow will write received telemetry data. -* Be familiar with the concept of [Components][] in Grafana Agent Flow. - -## Configure an OpenTelemetry Protocol exporter - -Before components can receive OpenTelemetry data, you must have a component -responsible for exporting the OpenTelemetry data. An OpenTelemetry _exporter -component_ is responsible for writing (that is, exporting) OpenTelemetry data -to an external system. - -In this task, we will use the [otelcol.exporter.otlp][] component to send -OpenTelemetry data to a server using the OpenTelemetry Protocol (OTLP). Once an -exporter component is defined, other Grafana Agent Flow components can be used -to forward data to it. - -> Refer to the list of available [Components][] for the full list of -> `otelcol.exporter` components that can be used to export OpenTelemetry data. - -To configure an `otelcol.exporter.otlp` component for exporting OpenTelemetry -data using OTLP, complete the following steps: - -1. Add the following `otelcol.exporter.otlp` component to your configuration - file: - - ```river - otelcol.exporter.otlp "EXPORTER_LABEL" { - client { - url = "HOST:PORT" - } - } - ``` - - 1. Replace `EXPORTER_LABEL` with a label to use for the component, such as - `default`. The label chosen must be unique across all - `otelcol.exporter.otlp` components in the same configuration file. - - 2. Replace `HOST` with the hostname or IP address of the server to send - OTLP requests to. - - 3. Replace `PORT` with the port of the server to send OTLP requests to. - -2. If your server requires basic authentication, complete the following: - - 1. Add the following `otelcol.auth.basic` component to your configuration file: - - ```river - otelcol.auth.basic "BASIC_AUTH_LABEL" { - username = "USERNAME" - password = "PASSWORD" - } - ``` - - 1. Replace `BASIC_AUTH_LABEL` with a label to use for the component, such - as `default`. The label chosen must be unique across all - `otelcol.auth.basic` components in the same configuration file. - - 2. Replace `USERNAME` with the basic authentication username to use. - - 3. Replace `PASSWORD` with the basic authentication password or API key to - use. - - 2. Add the following line inside of the `client` block of your - `otelcol.exporter.otlp` component: - - ```river - auth = otelcol.auth.basic.BASIC_AUTH_LABEL.handler - ``` - - 1. Replace `BASIC_AUTH_LABEL` with the label used for the - `otelcol.auth.basic` component in step 2.1.1. - -3. If you have more than one server to export metrics to, create a new - `otelcol.exporter.otlp` component for each additional server. - -> `otelcol.exporter.otlp` sends data using OTLP over gRPC (HTTP/2). To send to -> a server using HTTP/1.1, follow the steps above but use the -> [otelcol.exporter.otlphttp component][otelcol.exporter.otlphttp] instead. - -The following example demonstrates configuring `otelcol.exporter.otlp` with -authentication and a component which forwards data to it: - -```river -otelcol.exporter.otlp "default" { - client { - endpoint = "my-otlp-grpc-server:4317" - auth = otelcol.auth.basic.credentials.handler - } -} - -otelcol.auth.basic "credentials" { - // Retrieve credentials using environment variables. - - username = env("BASIC_AUTH_USER") - password = env("API_KEY") -} - -otelcol.receiver.otlp "example" { - grpc { - endpoint = "127.0.0.1:4317" - } - - http { - endpoint = "127.0.0.1:4318" - } - - output { - metrics = [otelcol.exporter.otlp.default.input] - logs = [otelcol.exporter.otlp.default.input] - traces = [otelcol.exporter.otlp.default.input] - } -} -``` - -For more information on writing OpenTelemetry data using the OpenTelemetry -Protocol, refer to [otelcol.exporter.otlp][]. - -## Configure batching - -Production-ready Grafana Agent Flow configurations should not send -OpenTelemetry data directly to an exporter for delivery. Instead, data is -usually sent to one or more _processor components_ that perform various -transformations on the data. - -Ensuring data is batched is a production-readiness step to improve the -compression of data and reduce the number of outgoing network requests to -external systems. - -In this task, we will configure an [otelcol.processor.batch][] component to -batch data before sending it to our exporter. - -> Refer to the list of available [Components][] for the full list of -> `otelcol.processor` components that can be used to process OpenTelemetry -> data. You can chain processors by having one processor send data to another -> processor. - -To configure an `otelcol.processor.batch` component, complete the following -steps: - -1. Follow [Configure an OpenTelemetry Protocol - exporter](#configure-an-opentelemetry-protocol-exporter) to ensure received - data can be written to an external system. - -2. Add the following `otelcol.processor.batch` component into your - configuration file: - - ```river - otelcol.processor.batch "PROCESSOR_LABEL" { - output { - metrics = [otelcol.exporter.otlp.EXPORTER_LABEL.input] - logs = [otelcol.exporter.otlp.EXPORTER_LABEL.input] - traces = [otelcol.exporter.otlp.EXPORTER_LABEL.input] - } - } - ``` - - 1. Replace `PROCESSOR_LABEL` with a label to use for the component, such as - `default`. The label chosen must be unique across all - `otelcol.processor.batch` components in the same configuration file. - - 2. Replace `EXPORTER_LABEL` with the label for your existing - `otelcol.exporter.otlp` component. - - 3. To disable one of the telemetry types, set the relevant type in the - `output` block to the empty list, such as `metrics = []`. - - 4. To send batched data to another processor, replace the components in the - `output` list with the processor components to use. - -The following example demonstrates configuring a sequence of -`otelcol.processor` components before ultimately being exported: - -```river -otelcol.processor.memory_limiter "default" { - check_interval = "1s" - limit = "1GiB" - - output { - metrics = [otelcol.processor.batch.default.input] - logs = [otelcol.processor.batch.default.input] - traces = [otelcol.processor.batch.default.input] - } -} - -otelcol.processor.batch "default" { - output { - metrics = [otelcol.exporter.otlp.default.input] - logs = [otelcol.exporter.otlp.default.input] - traces = [otelcol.exporter.otlp.default.input] - } -} - -otelcol.exporter.otlp "default" { - client { - endpoint = "my-otlp-grpc-server:4317" - } -} -``` - -For more information on configuring OpenTelemetry data batching, refer to -[otelcol.processor.batch][]. - -## Configure an OpenTelemetry Protocol receiver - -Grafana Agent Flow can be configured to receive OpenTelemetry metrics, logs, -and traces. An OpenTelemetry _receiver_ component is responsible for receiving -OpenTelemetry data from an external system. - -In this task, we will use the [otelcol.receiver.otlp][] component to receive -OpenTelemetry data over the network using the OpenTelemetry Protocol (OTLP). A -receiver component can be configured to forward received data to other Grafana -Agent Flow components. - -> Refer to the list of available [Components][] for the full list of -> `otelcol.receiver` components that can be used to receive -> OpenTelemetry-compatible data. - -To configure an `otelcol.receiver.otlp` component for receiving OTLP data, -complete the following steps: - -1. Follow [Configure an OpenTelemetry Protocol - exporter](#configure-an-opentelemetry-protocol-exporter) to ensure received - data can be written to an external system. - -2. Optional: Follow [Configure batching](#configure-batching) to improve - compression and reduce the total amount of network requests. - -3. Add the following `otelcol.receiver.otlp` component to your configuration - file: - - ```river - otelcol.receiver.otlp "LABEL" { - output { - metrics = [COMPONENT_INPUT_LIST] - logs = [COMPONENT_INPUT_LIST] - traces = [COMPONENT_INPUT_LIST] - } - } - ``` - - 1. Replace `LABEL` with a label to use for the component, such as - `default`. The label chosen must be unique across all - `otelcol.receiver.otlp` components in the same configuration file. - - 2. Replace `COMPONENT_INPUT_LIST` with a comma-delimited list of component - inputs to forward received data to. For example, to send data to an - existing batch processor component, use - `otelcol.processor.batch.PROCESSOR_LABEL.input`. To send data directly - to an existing exporter component, use - `otelcol.exporter.otlp.EXPORTER_LABEL.input`. - - 3. To allow applications to send OTLP data over gRPC on port `4317`, add - the following to your `otelcol.receiver.otlp` component: - - ```river - grpc { - endpoint = "HOST:4317" - } - ``` - - 1. Replace `HOST` with a host to listen to traffic on. It is - recommended to use a narrowly-scoped listen address whenever - possible. To listen on all network interfaces, replace `HOST` with - `0.0.0.0`. - - 4. To allow applications to send OTLP data over HTTP/1.1 on port `4318`, - add the following to your `otelcol.receiver.otlp` component: - - ```river - http { - endpoint = "HOST:4318" - } - ``` - - 1. Replace `HOST` with a host to listen to traffic on. It is - recommended to use a narrowly-scoped listen address whenever - possible. To listen on all network interfaces, replace `HOST` with - `0.0.0.0`. - - 5. To disable one of the telemetry types, set the relevant type in the - `output` block to the empty list, such as `metrics = []`. - -The following example demonstrates configuring `otelcol.receiver.otlp` and -sending it to an exporter: - -```river -otelcol.receiver.otlp "example" { - grpc { - endpoint = "127.0.0.1:4317" - } - - http { - endpoint = "127.0.0.1:4318" - } - - output { - metrics = [otelcol.processor.batch.example.input] - logs = [otelcol.processor.batch.example.input] - traces = [otelcol.processor.batch.example.input] - } -} - -otelcol.processor.batch "example" { - output { - metrics = [otelcol.exporter.otlp.default.input] - logs = [otelcol.exporter.otlp.default.input] - traces = [otelcol.exporter.otlp.default.input] - } -} - -otelcol.exporter.otlp "default" { - client { - endpoint = "my-otlp-grpc-server:4317" - } -} -``` - -For more information on receiving OpenTelemetry data using the OpenTelemetry -Protocol, refer to [otelcol.receiver.otlp][]. - -[OpenTelemetry]: https://opentelemetry.io - -{{% docs/reference %}} -[otelcol.auth.basic]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.auth.basic.md" -[otelcol.auth.basic]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.basic.md" -[otelcol.exporter.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlphttp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlphttp.md" -[otelcol.exporter.otlphttp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlphttp.md" -[otelcol.processor.batch]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.processor.batch.md" -[otelcol.processor.batch]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.batch.md" -[otelcol.receiver.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.receiver.otlp.md" -[otelcol.receiver.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.otlp.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/collect-prometheus-metrics.md b/docs/sources/flow/getting-started/collect-prometheus-metrics.md deleted file mode 100644 index b7bed5f92fe1..000000000000 --- a/docs/sources/flow/getting-started/collect-prometheus-metrics.md +++ /dev/null @@ -1,501 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-prometheus-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/collect-prometheus-metrics/ -title: Collect and forward Prometheus metrics -description: Learn how to collect and forward Prometheus metrics -weight: 200 ---- - -# Collect and forward Prometheus metrics - -Grafana Agent Flow can be configured to collect [Prometheus][] metrics and -forward them to any Prometheus-compatible database. - -This topic describes how to: - -* Configure metrics delivery -* Collect metrics from Kubernetes Pods - -## Components used in this topic - -* [discovery.kubernetes][] -* [prometheus.remote_write][] -* [prometheus.scrape][] - -## Before you begin - -* Ensure that you have basic familiarity with instrumenting applications with - Prometheus. -* Have a set of Prometheus exports or applications exposing Prometheus metrics - that you want to collect metrics from. -* Identify where you will write collected metrics. Metrics may be written to - Prometheus or Prometheus-compatible endpoints such as Grafana Mimir, Grafana - Cloud, or Grafana Enterprise Metrics. -* Be familiar with the concept of [Components][] in Grafana Agent Flow. - -## Configure metrics delivery - -Before components can collect Prometheus metrics, you must have a component -responsible for writing those metrics somewhere. - -The [prometheus.remote_write][] component is responsible for delivering -Prometheus metrics to one or Prometheus-compatible endpoints. Once a -`prometheus.remote_write` component is defined, other Grafana Agent Flow -components can be used to forward metrics to it. - -To configure a `prometheus.remote_write` component for metrics delivery, -complete the following steps: - -1. Add the following `prometheus.remote_write` component to your configuration file: - - ```river - prometheus.remote_write "LABEL" { - endpoint { - url = "PROMETHEUS_URL" - } - } - ``` - - 1. Replace `LABEL` with a label to use for the component, such as `default`. - The label chosen must be unique across all `prometheus.remote_write` - components in the same configuration file. - - 2. Replace `PROMETHEUS_URL` with the full URL of the Prometheus-compatible - endpoint where metrics will be sent, such as - `https://prometheus-us-central1.grafana.net/api/prom/push`. - -2. If your endpoint requires basic authentication, paste the following inside - of the `endpoint` block: - - ```river - basic_auth { - username = "USERNAME" - password = "PASSWORD" - } - ``` - - 1. Replace `USERNAME` with the basic authentication username to use. - - 2. Replace `PASSWORD` with the basic authentication password or API key to - use. - -3. If you have more than one endpoint to write metrics to, repeat the - `endpoint` block for additional endpoints. - -The following example demonstrates configuring `prometheus.remote_write` with -multiple endpoints and mixed usage of basic authentication, and a -`prometheus.scrape` component which forwards metrics to it: - -```river -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9090/api/prom/push" - } - - endpoint { - url = "https://prometheus-us-central1.grafana.net/api/prom/push" - - // Get basic authentication based on environment variables. - basic_auth { - username = env("REMOTE_WRITE_USERNAME") - password = env("REMOTE_WRITE_PASSWORD") - } - } -} - -prometheus.scrape "example" { - // Collect metrics from Grafana Agent's default listen address. - targets = [{ - __address__ = "127.0.0.1:12345", - }] - - forward_to = [prometheus.remote_write.default.receiver] -} -``` - -For more information on configuring metrics delivery, refer to -[prometheus.remote_write][]. - -## Collect metrics from Kubernetes Pods - -Grafana Agent Flow can be configured to collect metrics from Kubernetes Pods -by: - -1. Discovering Kubernetes Pods to collect metrics from. -2. Collecting metrics from those discovered Pods. - -To collect metrics from Kubernetes Pods, complete the following steps: - -1. Follow [Configure metrics delivery](#configure-metrics-delivery) to ensure - collected metrics can be written somewhere. - -2. Discover Kubernetes Pods: - - 1. Add the following `discovery.kubernetes` component to your configuration - file to discover every Pod in the cluster across all Namespaces: - - ```river - discovery.kubernetes "DISCOVERY_LABEL" { - role = "pod" - } - ``` - - This will generate one Prometheus target for every exposed port on every - discovered Pod. - - 1. Replace `DISCOVERY_LABEL` with a label to use for the component, such as - `pods`. The label chosen must be unique across all - `discovery.kubernetes` components in the same configuration file. - - 2. To limit the Namespaces that Pods are discovered in, add the following - block inside of the `discovery.kubernetes` component: - - ```river - namespaces { - own_namespace = true - names = [NAMESPACE_NAMES] - } - ``` - - 1. If you don't want to search for Pods in the Namespace Grafana - Agent is running in, set `own_namespace` to `false`. - - 2. Replace `NAMESPACE_NAMES` with a comma-delimited list of strings - representing Namespaces to search. Each string must be wrapped in - double quotes. For example, `"default","kube-system"`. - - 3. To use a field selector to limit the number of discovered Pods, add the - following block inside of the `discovery.kubernetes` component: - - ```river - selectors { - role = "pod" - field = "FIELD_SELECTOR" - } - ``` - - 1. Replace `FIELD_SELECTOR` with the Kubernetes field selector to use, - such as `metadata.name=my-service`. For more information on field - selectors, refer to the Kubernetes documentation on [Field - Selectors][]. - - 2. Create additional `selectors` blocks for each field selector you - want to apply. - - 4. To use a label selector to limit the number of discovered Pods, add the - following block inside of the `discovery.kubernetes` component: - - ```river - selectors { - role = "pod" - label = "LABEL_SELECTOR" - } - ``` - - 1. Replace `LABEL_SELECTOR` with the Kubernetes label selector to use, - such as `environment in (production, qa)`. For more information on - label selectors, refer to the Kubernetes documentation on [Labels - and Selectors][]. - - 2. Create additional `selectors` blocks for each label selector you - want to apply. - -3. Collect metrics from discovered Pods: - - 1. Add the following `prometheus.scrape` component to your configuration - file: - - ```river - prometheus.scrape "SCRAPE_LABEL" { - targets = discovery.kubernetes.DISCOVERY_LABEL.targets - forward_to = [prometheus.remote_write.REMOTE_WRITE_LABEL.receiver] - } - ``` - - 1. Replace `SCRAPE_LABEL` with a label to use for the component, such - as `pods`. The label chosen must be unique across all - `prometeus.scrape` components in the same configuration file. - - 2. Replace `DISCOVERY_LABEL` with the label chosen for the - `discovery.kubernetes` component in step 2.1.1. - - 3. Replace `REMOTE_WRITE_LABEL` with the label chosen for your existing - `prometheus.remote_write` component. - -The following example demonstrates configuring Grafana Agent to collect metrics -from running production Kubernetes Pods in the `default` Namespace: - -```river -discovery.kubernetes "pods" { - role = "pod" - - namespaces { - own_namespace = false - - names = ["default"] - } - - selectors { - role = "pod" - label = "environment in (production)" - } -} - -prometheus.scrape "pods" { - targets = discovery.kubernetes.pods.targets - forward_to = [prometheus.remote_write.default.receiver] -} - -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9090/api/prom/push" - } -} -``` - -For more information on configuring Kubernetes service delivery and collecting -metrics, refer to [discovery.kubernetes][] and [prometheus.scrape][]. - -## Collect metrics from Kubernetes Services - -Grafana Agent Flow can be configured to collect metrics from Kubernetes Services -by: - -1. Discovering Kubernetes Services to collect metrics from. -2. Collecting metrics from those discovered Services. - -To collect metrics from Kubernetes Services, complete the following steps: - -1. Follow [Configure metrics delivery](#configure-metrics-delivery) to ensure - collected metrics can be written somewhere. - -2. Discover Kubernetes Services: - - 1. Add the following `discovery.kubernetes` component to your configuration - file to discover every Services in the cluster across all Namespaces: - - ```river - discovery.kubernetes "DISCOVERY_LABEL" { - role = "service" - } - ``` - - This will generate one Prometheus target for every exposed port on every - discovered Service. - - 1. Replace `DISCOVERY_LABEL` with a label to use for the component, such as - `services`. The label chosen must be unique across all - `discovery.kubernetes` components in the same configuration file. - - 2. To limit the Namespaces that Services are discovered in, add the following - block inside of the `discovery.kubernetes` component: - - ```river - namespaces { - own_namespace = true - names = [NAMESPACE_NAMES] - } - ``` - - 1. If you do not want to search for Services in the Namespace Grafana - Agent is running in, set `own_namespace` to `false`. - - 2. Replace `NAMESPACE_NAMES` with a comma-delimited list of strings - representing Namespaces to search. Each string must be wrapped in - double quotes. For example, `"default","kube-system"`. - - 3. To use a field selector to limit the number of discovered Services, add the - following block inside of the `discovery.kubernetes` component: - - ```river - selectors { - role = "service" - field = "FIELD_SELECTOR" - } - ``` - - 1. Replace `FIELD_SELECTOR` with the Kubernetes field selector to use, - such as `metadata.name=my-service`. For more information on field - selectors, refer to the Kubernetes documentation on [Field - Selectors][]. - - 2. Create additional `selectors` blocks for each field selector you - want to apply. - - 4. To use a label selector to limit the number of discovered Services, add the - following block inside of the `discovery.kubernetes` component: - - ```river - selectors { - role = "service" - label = "LABEL_SELECTOR" - } - ``` - - 1. Replace `LABEL_SELECTOR` with the Kubernetes label selector to use, - such as `environment in (production, qa)`. For more information on - label selectors, refer to the Kubernetes documentation on [Labels - and Selectors][]. - - 2. Create additional `selectors` blocks for each label selector you - want to apply. - -3. Collect metrics from discovered Services: - - 1. Add the following `prometheus.scrape` component to your configuration - file: - - ```river - prometheus.scrape "SCRAPE_LABEL" { - targets = discovery.kubernetes.DISCOVERY_LABEL.targets - forward_to = [prometheus.remote_write.REMOTE_WRITE_LABEL.receiver] - } - ``` - - 1. Replace `SCRAPE_LABEL` with a label to use for the component, such - as `services`. The label chosen must be unique across all - `prometeus.scrape` components in the same configuration file. - - 2. Replace `DISCOVERY_LABEL` with the label chosen for the - `discovery.kubernetes` component in step 2.1.1. - - 3. Replace `REMOTE_WRITE_LABEL` with the label chosen for your existing - `prometheus.remote_write` component. - -The following example demonstrates configuring Grafana Agent to collect metrics -from running production Kubernetes Services in the `default` Namespace: - -```river -discovery.kubernetes "services" { - role = "service" - - namespaces { - own_namespace = false - - names = ["default"] - } - - selectors { - role = "service" - label = "environment in (production)" - } -} - -prometheus.scrape "services" { - targets = discovery.kubernetes.services.targets - forward_to = [prometheus.remote_write.default.receiver] -} - -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9090/api/prom/push" - } -} -``` - -For more information on configuring Kubernetes service delivery and collecting -metrics, refer to [discovery.kubernetes][] and [prometheus.scrape][]. - -## Collect metrics from custom targets - -Grafana Agent Flow can be configured to collect metrics from a custom set of -targets without the need for service discovery. - -To collect metrics from a custom set of targets, complete the following steps: - -1. Follow [Configure metrics delivery](#configure-metrics-delivery) to ensure - collected metrics can be written somewhere. - -2. Add the following `prometheus.scrape` component to your configuration file: - - ```river - prometheus.scrape "SCRAPE_LABEL" { - targets = [TARGET_LIST] - forward_to = [prometheus.remote_write.REMOTE_WRITE_LABEL.receiver] - } - ``` - - 1. Replace `SCRAPE_LABEL` with a label to use for the component, such as - `custom_targets`. The label chosen must be unique across all - `prometheus.scrape` components in the same configuration file. - - 2. Replace `TARGET_LIST` with a comma-delimited list of [Objects][] - denoting the Prometheus target. Each object must conform to the - following rules: - - * There must be an `__address__` key denoting the `HOST:PORT` of the - target to collect metrics from. - - * To explicitly specify which protocol to use, set the `__scheme__` key - to `"http"` or `"https"`. If the `__scheme__` key is not provided, - the protocol to use is inherited by the settings of the - `prometheus.scrape` component (default `"http"`). - - * To explicitly specify which HTTP path to collect metrics from, set - the `__metrics_path__` key to the HTTP path to use. If the - `__metrics_path__` key is not provided, the path to use is - inherited by the settings of the `prometheus.scrape` component - (default `"/metrics"`). - - * Add additional keys as desired to inject extra labels to collected - metrics. Any label starting with two underscores (`__`) will be - dropped prior to scraping. - - 3. Replace `REMOTE_WRITE_LABEL` with the label chosen for your existing - `prometheus.remote_write` component. - -The following example demonstrates configuring `prometheus.scrape` to collect -metrics from a custom set of endpoints: - -```river -prometheus.scrape "custom_targets" { - targets = [ - { - __address__ = "prometheus:9090", - }, - { - __address__ = "mimir:8080", - __scheme__ = "https", - }, - { - __address__ = "custom-application:80", - __metrics_path__ = "/custom-metrics–path", - }, - { - __address__ = "grafana-agent:12345", - application = "grafana-agent", - environment = "production", - }, - ] - - forward_to = [prometheus.remote_write.default.receiver] -} - -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9090/api/prom/push" - } -} -``` - -[Prometheus]: https://prometheus.io -[Field Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ -[Labels and Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement -[Field Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ -[Labels and Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement - -{{% docs/reference %}} -[discovery.kubernetes]: "/docs/agent/ -> /docs/agent//flow/reference/components/discovery.kubernetes.md" -[discovery.kubernetes]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kubernetes.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.remote_write.md" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.scrape.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components.md" -[Objects]: "/docs/agent/ -> /docs/agent//flow/config-language/expressions/types_and_values.md#objects" -[Objects]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/types_and_values.md#objects" -{{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/configure-agent-clustering.md b/docs/sources/flow/getting-started/configure-agent-clustering.md deleted file mode 100644 index af7085b9d1f1..000000000000 --- a/docs/sources/flow/getting-started/configure-agent-clustering.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/configure-agent-clustering/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/configure-agent-clustering/ -menuTitle: Configure Grafana Agent clustering -title: Configure Grafana Agent clustering in an existing installation -description: Learn how to configure Grafana Agent clustering in an existing installation -weight: 400 ---- - -# Configure Grafana Agent clustering in an existing installation - -You can configure Grafana Agent to run with [clustering][] so that -individual agents can work together for workload distribution and high -availability. - - -> **Note:** Clustering is a [beta][] feature. Beta features are subject to breaking changes and may be -> replaced with equivalent functionality that covers the same use case. - -This topic describes how to add clustering to an existing installation. - -## Configure Grafana Agent clustering with Helm Chart - -This section guides you through enabling clustering when Grafana Agent is -installed on Kubernetes using the [Grafana Agent Helm chart][install-helm]. - -### Before you begin - -- Ensure that your `values.yaml` file has `controller.type` set to - `statefulset`. - -### Steps - -To configure clustering: - -1. Amend your existing values.yaml file to add `clustering.enabled=true` inside - of the `agent` block: - - ```yaml - agent: - clustering: - enabled: true - ``` - -1. Upgrade your installation to use the new `values.yaml` file: - - ```bash - helm upgrade RELEASE_NAME -f values.yaml - ``` - - Replace `RELEASE_NAME` with the name of the installation you chose when you - installed the Helm chart. - -1. Use the [Grafana Agent UI][UI] to verify the cluster status: - - 1. Click **Clustering** in the navigation bar. - - 2. Ensure that all expected nodes appear in the resulting table. - -{{% docs/reference %}} -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/clustering.md" -[beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" -[beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/stability.md#beta" -[install-helm]: "/docs/agent/ -> /docs/agent//flow/setup/install/kubernetes.md" -[install-helm]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/kubernetes.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md#component-detail-page" -{{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/distribute-prometheus-scrape-load.md b/docs/sources/flow/getting-started/distribute-prometheus-scrape-load.md deleted file mode 100644 index b3319b72e193..000000000000 --- a/docs/sources/flow/getting-started/distribute-prometheus-scrape-load.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/distribute-prometheus-scrape-load/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/distribute-prometheus-scrape-load/ -menuTitle: Distribute Prometheus metrics scrape load -title: Distribute Prometheus metrics scrape load -description: Learn how to distribute your Prometheus metrics scrape load -weight: 500 ---- - -# Distribute Prometheus metrics scrape load - -A good predictor for the size of an agent deployment is the number of -Prometheus targets each agent scrapes. [Clustering][] with target -auto-distribution allows a fleet of agents to work together to dynamically -distribute their scrape load, providing high-availability. - -> **Note:** Clustering is a [beta][] feature. Beta features are subject to breaking -> changes and may be replaced with equivalent functionality that covers the same use case. - -## Before you begin - -- Familiarize yourself with how to [configure existing Grafana Agent installations][configure-grafana-agent]. -- [Configure Prometheus metrics collection][]. -- [Configure clustering][] of agents. -- Ensure that all of your clustered agents have the same configuration file. - -## Steps - -To distribute Prometheus metrics scrape load with clustering: - -1. Add the following block to all `prometheus.scrape` components which - should use auto-distribution: - - ```river - clustering { - enabled = true - } - ``` - -2. Restart or reload agents for them to use the new configuration. - -3. Validate that auto-distribution is functioning: - - 1. Using the [Grafana Agent UI][UI] on each agent, navigate to the details page for one of - the `prometheus.scrape` components you modified. - - 2. Compare the Debug Info sections between two different agents to ensure - that they're not scraping the same sets of targets. - -{{% docs/reference %}} -[Clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[Clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/clustering.md" -[beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" -[beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/stability.md#beta" -[configure-grafana-agent]: "/docs/agent/ -> /docs/agent//flow/setup/configure" -[configure-grafana-agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure" -[Configure Prometheus metrics collection]: "/docs/agent/ -> /docs/agent//flow/getting-started/collect-prometheus-metrics.md" -[Configure Prometheus metrics collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-prometheus-metrics.md" -[Configure clustering]: "/docs/agent/ -> /docs/agent//flow/getting-started/configure-agent-clustering.md" -[Configure clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/configure-agent-clustering.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md#component-detail-page" -{{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/migrating-from-operator.md b/docs/sources/flow/getting-started/migrating-from-operator.md deleted file mode 100644 index da62154488cb..000000000000 --- a/docs/sources/flow/getting-started/migrating-from-operator.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-operator/ -description: Migrating from Grafana Agent Operator to Grafana Agent Flow -menuTitle: Migrate from Operator -title: Migrating from Grafana Agent Operator to Grafana Agent Flow -weight: 320 ---- - -# Migrating from Grafana Agent Operator to Grafana Agent Flow - -With the release of Flow, Grafana Agent Operator is no longer the recommended way to deploy Grafana Agent in Kubernetes. Some of the Operator functionality has been moved into Grafana Agent -itself, and the remaining functionality has been replaced by our Helm Chart. - -- The Monitor types (`PodMonitor`, `ServiceMonitor`, `Probe`, and `LogsInstance`) are all supported natively by Grafana Agent in Flow mode. You are no longer -required to use the Operator to consume those CRDs for dynamic monitoring in your cluster. -- The parts of the Operator that deploy the Agent itself (`GrafanaAgent`, `MetricsInstance`, and `LogsInstance` CRDs) are deprecated. We now recommend -operator users use the [Grafana Agent Helm Chart](https://grafana.com/docs/agent/latest/flow/setup/install/kubernetes/) to deploy the Agent directly to your clusters. - -This guide will provide some steps to get started with Grafana Agent for users coming from Grafana Agent Operator. - -## Deploy Grafana Agent with Helm - -1. You will need to create a `values.yaml` file, which contains options for deploying your Agent. You may start with the [default values](https://github.com/grafana/agent/blob/main/operations/helm/charts/grafana-agent/values.yaml) and customize as you see fit, or start with this snippet, which should be a good starting point for what the Operator does: - - ```yaml - agent: - mode: 'flow' - configMap: - create: true - clustering: - enabled: true - controller: - type: 'statefulset' - replicas: 2 - crds: - create: false - ``` - - This configuration will deploy Grafana Agent as a `StatefulSet` using the built-in [clustering](https://grafana.com/docs/agent/latest/flow/concepts/clustering/) functionality to allow distributing scrapes across all Agent Pods. - - This is not the only deployment mode possible. For example, you may want to use a `DaemonSet` to collect host-level logs or metrics. See [the Agent deployment guide](https://grafana.com/docs/agent/latest/flow/setup/deploy-agent/) for more details about different topologies. - -2. Create a Flow config file, `agent.river`. - - We will be adding to this config in the next step as we convert `MetricsInstances`. You can add any additional configuration to this file as you desire. - -3. Install the grafana helm repository: - - ``` - helm repo add grafana https://grafana.github.io/helm-charts - helm repo update - ``` - -4. Create a Helm release. You may name the release anything you like. Here we are installing a release named `grafana-agent-metrics` in the `monitoring` namespace. - - ```shell - helm upgrade grafana-agent-metrics grafana/grafana-agent -i -n monitoring -f values.yaml --set-file agent.configMap.content=agent.river - ``` - - This command uses the `--set-file` flag to pass the configuration file as a Helm value, so that we can continue to edit it as a regular River file. - -## Convert `MetricsIntances` to Flow components - -A `MetricsInstance` resource primarily defines: - -- The remote endpoint(s) Grafana Agent should send metrics to. -- Which `PodMonitor`, `ServiceMonitor`, and `Probe` resources this Agent should discover. - -These functions can be done in Grafana Agent Flow with the `prometheus.remote_write`, `prometheus.operator.podmonitors`, `prometheus.operator.servicemonitors`, and `prometheus.operator.probes` components respectively. - -This is a River sample that is equivalent to the `MetricsInstance` from our [operator guide](https://grafana.com/docs/agent/latest/operator/deploy-agent-operator-resources/#deploy-a-metricsinstance-resource): - -```river - -// read the credentials secret for remote_write authorization -remote.kubernetes.secret "credentials" { - namespace = "monitoring" - name = "primary-credentials-metrics" -} - -prometheus.remote_write "primary" { - endpoint { - url = "https://PROMETHEUS_URL/api/v1/push" - basic_auth { - username = nonsensitive(remote.kubernetes.secret.credentials.data["username"]) - password = remote.kubernetes.secret.credentials.data["password"] - } - } -} - -prometheus.operator.podmonitors "primary" { - forward_to = [prometheus.remote_write.primary.receiver] - // leave out selector to find all podmonitors in the entire cluster - selector { - match_labels = {instance = "primary"} - } -} - -prometheus.operator.servicemonitors "primary" { - forward_to = [prometheus.remote_write.primary.receiver] - // leave out selector to find all servicemonitors in the entire cluster - selector { - match_labels = {instance = "primary"} - } -} - -``` - -You will need to replace `PROMETHEUS_URL` with the actual endpoint you want to send metrics to. - -This configuration will discover all `PodMonitor`, `ServiceMonitor`, and `Probe` resources in your cluster that match our label selector `instance=primary`. It will then scrape metrics from their targets and forward them to your remote write endpoint. - -You may need to customize this configuration further if you use additional features in your `MetricsInstance` resources. Refer to the documentation for the relevant components for additional information: - -- [remote.kubernetes.secret](https://grafana.com/docs/agent/latest/flow/reference/components/remote.kubernetes.secret) -- [prometheus.remote_write](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write) -- [prometheus.operator.podmonitors](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.podmonitors) -- [prometheus.operator.servicemonitors](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.servicemonitors) -- [prometheus.operator.probes](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.probes) -- [prometheus.scrape](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.scrape) - -## Collecting Logs - -Our current recommendation is to create an additional DaemonSet deployment of Grafana Agents to scrape logs. - -> We have components that can scrape pod logs directly from the Kubernetes API without needing a DaemonSet deployment. These are -> still considered experimental, but if you would like to try them, see the documentation for [loki.source.kubernetes](https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes/) and -> [loki.source.podlogs](https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.podlogs/). - -These values are close to what the Operator currently deploys for logs: - -```yaml -agent: - mode: 'flow' - configMap: - create: true - clustering: - enabled: false - controller: - type: 'daemonset' - mounts: - # -- Mount /var/log from the host into the container for log collection. - varlog: true -``` - -This command will install a release named `grafana-agent-logs` in the `monitoring` namespace: - -``` -helm upgrade grafana-agent-logs grafana/grafana-agent -i -n monitoring -f values-logs.yaml --set-file agent.configMap.content=agent-logs.river -``` - -This simple configuration will scrape logs for every pod on each node: - -``` -// read the credentials secret for remote_write authorization -remote.kubernetes.secret "credentials" { - namespace = "monitoring" - name = "primary-credentials-logs" -} - -discovery.kubernetes "pods" { - role = "pod" - // limit to pods on this node to reduce the amount we need to filter - selectors { - role = "pod" - field = "spec.nodeName=" + env("HOSTNAME") - } -} - -discovery.relabel "pod_logs" { - targets = discovery.kubernetes.pods.targets - rule { - source_labels = ["__meta_kubernetes_namespace"] - target_label = "namespace" - } - rule { - source_labels = ["__meta_kubernetes_pod_name"] - target_label = "pod" - } - rule { - source_labels = ["__meta_kubernetes_pod_container_name"] - target_label = "container" - } - rule { - source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_name"] - separator = "/" - target_label = "job" - } - rule { - source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] - separator = "/" - action = "replace" - replacement = "/var/log/pods/*$1/*.log" - target_label = "__path__" - } -} - -local.file_match "pod_logs" { - path_targets = discovery.relabel.pod_logs.output -} - -loki.source.file "pod_logs" { - targets = local.file_match.pod_logs.targets - forward_to = [loki.process.pod_logs.receiver] -} - -// basic processing to parse the container format. You can add additional processing stages -// to match your application logs. -loki.process "pod_logs" { - stage.match { - selector = "{tmp_container_runtime=\"containerd\"}" - // the cri processing stage extracts the following k/v pairs: log, stream, time, flags - stage.cri {} - // Set the extract flags and stream values as labels - stage.labels { - values = { - flags = "", - stream = "", - } - } - } - - // if the label tmp_container_runtime from above is docker parse using docker - stage.match { - selector = "{tmp_container_runtime=\"docker\"}" - // the docker processing stage extracts the following k/v pairs: log, stream, time - stage.docker {} - - // Set the extract stream value as a label - stage.labels { - values = { - stream = "", - } - } - } - - // drop the temporary container runtime label as it is no longer needed - stage.label_drop { - values = ["tmp_container_runtime"] - } - - forward_to = [loki.write.loki.receiver] -} - -loki.write "loki" { - endpoint { - url = "https://LOKI_URL/loki/api/v1/push" - basic_auth { - username = nonsensitive(remote.kubernetes.secret.credentials.data["username"]) - password = remote.kubernetes.secret.credentials.data["password"] - } -} -} -``` - -You will need to replace `LOKI_URL` with the actual endpoint of your Loki instance. The logging subsytem is very powerful -and has many options for processing logs. For further details see the [component documentation](https://grafana.com/docs/agent/latest/flow/reference/components/). - - -## Integrations - -The `Integration` CRD is not supported with Grafana Agent Flow, however all static mode integrations have an equivalent component in the [`prometheus.exporter`](https://grafana.com/docs/agent/latest/flow/reference/components) namespace. The reference docs should help convert those integrations to their Flow equivalent. diff --git a/docs/sources/flow/getting-started/migrating-from-prometheus.md b/docs/sources/flow/getting-started/migrating-from-prometheus.md deleted file mode 100644 index 14e32241f83b..000000000000 --- a/docs/sources/flow/getting-started/migrating-from-prometheus.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-prometheus/ -menuTitle: Migrate from Prometheus -title: Migrate from Prometheus to Grafana Agent Flow -description: Learn how to migrate from Prometheus to Grafana Agent Flow -weight: 320 ---- - -# Migrate from Prometheus to Grafana Agent Flow - -The built-in Grafana Agent convert command can migrate your [Prometheus][] configuration to a Grafana Agent flow configuration. - -This topic describes how to: - -* Convert a Prometheus configuration to a flow configuration. -* Run a Prometheus configuration natively using Grafana Agent flow mode. - -## Components used in this topic - -* [prometheus.scrape][] -* [prometheus.remote_write][] - -## Before you begin - -* You must have an existing Prometheus configuration. -* You must have a set of Prometheus applications ready to push telemetry data to Grafana Agent. -* You must be familiar with the concept of [Components][] in Grafana Agent flow mode. - -## Convert a Prometheus configuration - -To fully migrate your configuration from [Prometheus] to Grafana Agent -in flow mode, you must convert your Prometheus configuration into a Grafana Agent flow -mode configuration. This conversion will enable you to take full advantage of the many -additional features available in Grafana Agent flow mode. - -> In this task, we will use the [convert][] CLI command to output a flow -> configuration from a Prometheus configuration. - -1. Open a terminal window and run the following command: - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=prometheus --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=prometheus --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - - Replace the following: - * `INPUT_CONFIG_PATH`: The full path to the Prometheus configuration. - * `OUTPUT_CONFIG_PATH`: The full path to output the flow configuration. - -1. [Start the agent][] in flow mode using the new flow configuration from `OUTPUT_CONFIG_PATH`: - -### Debugging - -1. If the convert command can't convert a Prometheus configuration, - diagnostic information is sent to `stderr`. You can bypass - any non-critical issues and output the flow configuration using a - best-effort conversion by including the `--bypass-errors` flag. - - {{% admonition type="caution" %}} - If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. - Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=prometheus --bypass-errors --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=prometheus --bypass-errors --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - -1. You can also output a diagnostic report by including the `--report` flag. - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=prometheus --report=OUTPUT_REPORT_PATH --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=prometheus --report=OUTPUT_REPORT_PATH --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - - * Replace `OUTPUT_REPORT_PATH` with the output path for the report. - - Using the [example](#example) Prometheus configuration below, the diagnostic report provides the following information: - - ```plaintext - (Info) Converted scrape_configs job_name "prometheus" into... - A prometheus.scrape.prometheus component - (Info) Converted 1 remote_write[s] "grafana-cloud" into... - A prometheus.remote_write.default component - ``` - -## Run a Prometheus configuration - -If you’re not ready to completely switch to a flow configuration, you can run Grafana Agent using your existing Prometheus configuration. -The `--config.format=prometheus` flag tells Grafana Agent to convert your Prometheus configuration to flow mode and load it directly without saving the new configuration. -This allows you to try flow mode without modifying your existing Prometheus configuration infrastructure. - -> In this task, we will use the [run][] CLI command to run Grafana Agent in flow -> mode using a Prometheus configuration. - -[Start the agent][] in flow mode and include the command line flag - `--config.format=prometheus`. Your configuration file must be a valid - Prometheus configuration file rather than a flow mode configuration file. - -### Debugging - -1. You can follow the convert CLI command [debugging][] instructions to - generate a diagnostic report. - -1. Refer to the Grafana Agent [Flow Debugging][] for more information about a running Grafana - Agent in flow mode. - -1. If your Prometheus configuration cannot be converted and - loaded directly into Grafana Agent, diagnostic information - is sent to `stderr`. You can bypass any non-critical issues - and start the Agent by including the - `--config.bypass-conversion-errors` flag in addition to - `--config.format=prometheus`. - - {{% admonition type="caution" %}} - If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. - Do not use this flag in a production environment. - {{% /admonition %}} - -## Example - -This example demonstrates converting a Prometheus configuration file to a Grafana Agent flow mode configuration file. - -The following Prometheus configuration file provides the input for the conversion: - -```yaml -global: - scrape_timeout: 45s - -scrape_configs: - - job_name: "prometheus" - static_configs: - - targets: ["localhost:12345"] - -remote_write: - - name: "grafana-cloud" - url: "https://prometheus-us-central1.grafana.net/api/prom/push" - basic_auth: - username: USERNAME - password: PASSWORD -``` - -The convert command takes the YAML file as input and outputs a [River][] file. - -{{< code >}} - -```static-binary -AGENT_MODE=flow grafana-agent convert --source-format=prometheus --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH -``` - -```flow-binary -grafana-agent-flow convert --source-format=prometheus --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH -``` - -{{< /code >}} - -The new flow configuration file looks like this: - -```river -prometheus.scrape "prometheus" { - targets = [{ - __address__ = "localhost:12345", - }] - forward_to = [prometheus.remote_write.default.receiver] - job_name = "prometheus" - scrape_timeout = "45s" -} - -prometheus.remote_write "default" { - endpoint { - name = "grafana-cloud" - url = "https://prometheus-us-central1.grafana.net/api/prom/push" - - basic_auth { - username = "USERNAME" - password = "PASSWORD" - } - - queue_config { - capacity = 2500 - max_shards = 200 - max_samples_per_send = 500 - } - - metadata_config { - max_samples_per_send = 500 - } - } -} -``` - -## Limitations - -Configuration conversion is done on a best-effort basis. The Agent will issue -warnings or errors where the conversion cannot be performed. - -Once the configuration is converted, we recommend that you review -the Flow Mode configuration file created and verify that it is correct -before starting to use it in a production environment. - -Furthermore, we recommend that you review the following checklist: - -* The following configurations are not available for conversion to flow mode: - `rule_files`, `alerting`, `remote_read`, `storage`, and `tracing`. Any - additional unsupported features are returned as errors during conversion. -* Check if you are using any extra command line arguments with Prometheus that - are not present in your configuration file. For example, `--web.listen-address`. -* Metamonitoring metrics exposed by the Flow Mode usually match Prometheus - metamonitoring metrics but will use a different name. Make sure that you use - the new metric names, for example, in your alerts and dashboards queries. -* The logs produced by Grafana Agent will differ from those - produced by Prometheus. -* Grafana Agent exposes the [Grafana Agent Flow UI][]. - -[Prometheus]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/ -[debugging]: #debugging - -{{% docs/reference %}} -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.scrape.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.remote_write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" -[Start the agent]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md" -[Start the agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent.md" -[Flow Debugging]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md" -[Flow Debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md" -[River]: "/docs/agent/ -> /docs/agent//flow/config-language/_index.md" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/_index.md" -[Grafana Agent Flow UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging#grafana-agent-flow-ui" -[Grafana Agent Flow UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging#grafana-agent-flow-ui" -{{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/migrating-from-promtail.md b/docs/sources/flow/getting-started/migrating-from-promtail.md deleted file mode 100644 index 7fbe1b109d16..000000000000 --- a/docs/sources/flow/getting-started/migrating-from-promtail.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-promtail/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-promtail/ -menuTitle: Migrate from Promtail -title: Migrate from Promtail to Grafana Agent Flow -description: Learn how to migrate from Promtail to Grafana Agent Flow -weight: 330 ---- - -# Migrate from Promtail to Grafana Agent Flow - -The built-in Grafana Agent convert command can migrate your [Promtail][] -configuration to a Grafana Agent flow configuration. - -This topic describes how to: - -* Convert a Promtail configuration to a Flow Mode configuration. -* Run a Promtail configuration natively using Grafana Agent Flow Mode. - -## Components used in this topic - -* [local.file_match][] -* [loki.source.file][] -* [loki.write][] - -## Before you begin - -* You must have an existing Promtail configuration. -* You must be familiar with the concept of [Components][] in Grafana Agent Flow mode. - -## Convert a Promtail configuration - -To fully migrate from [Promtail] to Grafana Agent Flow Mode, you must convert -your Promtail configuration into a Grafana Agent Flow Mode configuration. This -conversion will enable you to take full advantage of the many additional -features available in Grafana Agent Flow Mode. - -> In this task, we will use the [convert][] CLI command to output a flow -> configuration from a Promtail configuration. - -1. Open a terminal window and run the following command: - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=promtail --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=promtail --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - - - Replace the following: - * `INPUT_CONFIG_PATH`: The full path to the Promtail configuration. - * `OUTPUT_CONFIG_PATH`: The full path to output the flow configuration. - -1. [Start the Agent][] in Flow Mode using the new flow configuration - from `OUTPUT_CONFIG_PATH`: - -### Debugging - -1. If the convert command cannot convert a Promtail configuration, diagnostic - information is sent to `stderr`. You can bypass any non-critical issues and - output the flow configuration using a best-effort conversion by including - the `--bypass-errors` flag. - - {{% admonition type="caution" %}} - If you bypass the errors, the behavior of the converted configuration may not match the original Promtail configuration. - Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=promtail --bypass-errors --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=promtail --bypass-errors --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - -1. You can also output a diagnostic report by including the `--report` flag. - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=promtail --report=OUTPUT_REPORT_PATH --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=promtail --report=OUTPUT_REPORT_PATH --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - - * Replace `OUTPUT_REPORT_PATH` with the output path for the report. - - Using the [example](#example) Promtail configuration below, the diagnostic - report provides the following information: - - ```plaintext - (Warning) If you have a tracing set up for Promtail, it cannot be migrated to Flow Mode automatically. Refer to the documentation on how to configure tracing in Flow Mode. - (Warning) The Agent Flow Mode's metrics are different from the metrics emitted by Promtail. If you rely on Promtail's metrics, you must change your configuration, for example, your alerts and dashboards. - ``` - -## Run a Promtail configuration - -If you’re not ready to completely switch to a flow configuration, you can run -Grafana Agent using your existing Promtail configuration. -The `--config.format=promtail` flag tells Grafana Agent to convert your Promtail -configuration to Flow Mode and load it directly without saving the new -configuration. This allows you to try Flow Mode without modifying your existing -Promtail configuration infrastructure. - -> In this task, we will use the [run][] CLI command to run Grafana Agent in Flow -> mode using a Promtail configuration. - -[Start the Agent][] in Flow mode and include the command line flag -`--config.format=promtail`. Your configuration file must be a valid Promtail -configuration file rather than a Flow mode configuration file. - -### Debugging - -1. You can follow the convert CLI command [debugging][] instructions to generate - a diagnostic report. - -1. Refer to the Grafana Agent [Flow Debugging][] for more information about - running Grafana Agent in Flow mode. - -1. If your Promtail configuration can't be converted and loaded directly into - Grafana Agent, diagnostic information is sent to `stderr`. You can bypass any - non-critical issues and start the Agent by including the - `--config.bypass-conversion-errors` flag in addition to - `--config.format=promtail`. - - {{% admonition type="caution" %}} - If you bypass the errors, the behavior of the converted configuration may not match the original Promtail configuration. - Do not use this flag in a production environment. - {{%/admonition %}} - -## Example - -This example demonstrates converting a Promtail configuration file to a Grafana -Agent Flow mode configuration file. - -The following Promtail configuration file provides the input for the conversion: - -```yaml -clients: - - url: http://localhost/loki/api/v1/push -scrape_configs: - - job_name: example - static_configs: - - targets: - - localhost - labels: - __path__: /var/log/*.log -``` - -The convert command takes the YAML file as input and outputs a [River][] file. - -{{< code >}} - -```static-binary -AGENT_MODE=flow grafana-agent convert --source-format=promtail --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH -``` - -```flow-binary -grafana-agent-flow convert --source-format=promtail --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH -``` - -{{< /code >}} - -The new Flow Mode configuration file looks like this: - -```river -local.file_match "example" { - path_targets = [{ - __address__ = "localhost", - __path__ = "/var/log/*.log", - }] -} - -loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.write.default.receiver] -} - -loki.write "default" { - endpoint { - url = "http://localhost/loki/api/v1/push" - } - external_labels = {} -} -``` - -## Limitations - -Configuration conversion is done on a best-effort basis. Grafana Agent will issue -warnings or errors where the conversion can't be performed. - -Once the configuration is converted, we recommend that you review -the Flow Mode configuration file created, and verify that it's correct -before starting to use it in a production environment. - -Furthermore, we recommend that you review the following checklist: - -* Check if you are using any extra command line arguments with Promtail which - aren't present in your configuration file. For example, `-max-line-size`. -* Check if you are setting any environment variables, - whether [expanded in the config file][] itself or consumed directly by - Promtail, such as `JAEGER_AGENT_HOST`. -* In Flow Mode, the positions file is saved at a different location. - Refer to the [loki.source.file][] documentation for more details. Check if you have any existing - setup, for example, a Kubernetes Persistent Volume, that you must update to use the new - positions file path. -* Metamonitoring metrics exposed by the Flow Mode usually match Promtail - metamonitoring metrics but will use a different name. Make sure that you - use the new metric names, for example, in your alerts and dashboards queries. -* Note that the logs produced by the Agent will differ from those - produced by Promtail. -* Note that the Agent exposes the [Grafana Agent Flow UI][], which differs - from Promtail's Web UI. - -[Promtail]: https://www.grafana.com/docs/loki//clients/promtail/ -[debugging]: #debugging -[expanded in the config file]: https://www.grafana.com/docs/loki//clients/promtail/configuration/#use-environment-variables-in-the-configuration - -{{% docs/reference %}} -[local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" -[local.file_match]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file_match.md" -[loki.source.file]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.file.md" -[loki.source.file]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.file.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" -[Start the agent]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md" -[Start the agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent.md" -[Flow Debugging]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md" -[Flow Debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md" -[River]: "/docs/agent/ -> /docs/agent//flow/config-language/_index.md" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/_index.md" -[Grafana Agent Flow UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging#grafana-agent-flow-ui" -[Grafana Agent Flow UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging#grafana-agent-flow-ui" -{{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/migrating-from-static.md b/docs/sources/flow/getting-started/migrating-from-static.md deleted file mode 100644 index 0b1f0bedd2d0..000000000000 --- a/docs/sources/flow/getting-started/migrating-from-static.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-static/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-static/ -description: Learn how to migrate your configuration from Grafana Agent Static mode to Flow mode -menuTitle: Migrate from Static mode to Flow mode -title: Migrate Grafana Agent from Static mode to Flow mode -weight: 340 ---- - -# Migrate Grafana Agent from Static mode to Flow mode - -The built-in Grafana Agent convert command can migrate your [Static][] mode -configuration to a Flow mode configuration. - -This topic describes how to: - -* Convert a Grafana Agent Static mode configuration to a Flow mode configuration. -* Run a Grafana Agent Static mode configuration natively using Grafana Agent Flow mode. - -## Components used in this topic - -* [prometheus.scrape][] -* [prometheus.remote_write][] -* [local.file_match][] -* [loki.process][] -* [loki.source.file][] -* [loki.write][] - -## Before you begin - -* You must have an existing Grafana Agent Static mode configuration. -* You must be familiar with the [Components][] concept in Grafana Agent Flow mode. - -## Convert a Static mode configuration - -To fully migrate Grafana Agent from [Static][] mode to Flow mode, you must convert -your Static mode configuration into a Flow mode configuration. -This conversion will enable you to take full advantage of the many additional -features available in Grafana Agent Flow mode. - -> In this task, we will use the [convert][] CLI command to output a Flow mode -> configuration from a Static mode configuration. - -1. Open a terminal window and run the following command: - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=static --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=static --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - - Replace the following: - * `INPUT_CONFIG_PATH`: The full path to the [Static][] configuration. - * `OUTPUT_CONFIG_PATH`: The full path to output the flow configuration. - -1. [Start the Agent][] in Flow mode using the new Flow mode configuration - from `OUTPUT_CONFIG_PATH`: - -### Debugging - -1. If the convert command cannot convert a [Static] mode configuration, diagnostic - information is sent to `stderr`. You can use the `--bypass-errors` flag to - bypass any non-critical issues and output the Flow mode configuration - using a best-effort conversion. - - {{% admonition type="caution" %}}If you bypass the errors, the behavior of the converted configuration may not match the original [Static] mode configuration. Make sure you fully test the converted configuration before using it in a production environment.{{% /admonition %}} - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=static --bypass-errors --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=static --bypass-errors --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - -1. You can use the `--report` flag to output a diagnostic report. - - {{< code >}} - - ```static-binary - AGENT_MODE=flow grafana-agent convert --source-format=static --report=OUTPUT_REPORT_PATH --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - ```flow-binary - grafana-agent-flow convert --source-format=static --report=OUTPUT_REPORT_PATH --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH - ``` - - {{< /code >}} - - * Replace `OUTPUT_REPORT_PATH` with the output path for the report. - - Using the [example](#example) Grafana Agent Static Mode configuration below, the diagnostic - report provides the following information: - - ```plaintext - (Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. - ``` - -## Run a Static mode configuration - -If you’re not ready to completely switch to a Flow mode configuration, you can run -Grafana Agent using your existing Grafana Agent Static mode configuration. -The `--config.format=static` flag tells Grafana Agent to convert your [Static] mode -configuration to Flow mode and load it directly without saving the new -configuration. This allows you to try Flow mode without modifying your existing -Grafana Agent Static mode configuration infrastructure. - -> In this task, we will use the [run][] CLI command to run Grafana Agent in Flow -> mode using a Static mode configuration. - -[Start the Agent][] in Flow mode and include the command line flag -`--config.format=static`. Your configuration file must be a valid [Static] -mode configuration file. - -### Debugging - -1. You can follow the convert CLI command [debugging][] instructions to generate - a diagnostic report. - -1. Refer to the Grafana Agent [Flow Debugging][] for more information about - running Grafana Agent in Flow mode. - -1. If your [Static] mode configuration can't be converted and loaded directly into - Grafana Agent, diagnostic information is sent to `stderr`. You can use the ` - --config.bypass-conversion-errors` flag with `--config.format=static` to bypass any - non-critical issues and start the Agent. - - {{% admonition type="caution" %}}If you bypass the errors, the behavior of the converted configuration may not match the original Grafana Agent Static mode configuration. Do not use this flag in a production environment.{{%/admonition %}} - -## Example - -This example demonstrates converting a [Static] mode configuration file to a Flow mode configuration file. - -The following [Static] mode configuration file provides the input for the conversion: - -```yaml -server: - log_level: info - -metrics: - global: - scrape_interval: 15s - remote_write: - - url: https://prometheus-us-central1.grafana.net/api/prom/push - basic_auth: - username: USERNAME - password: PASSWORD - configs: - - name: test - host_filter: false - scrape_configs: - - job_name: local-agent - static_configs: - - targets: ['127.0.0.1:12345'] - labels: - cluster: 'localhost' - -logs: - global: - file_watch_config: - min_poll_frequency: 1s - max_poll_frequency: 5s - positions_directory: /var/lib/agent/data-agent - configs: - - name: varlogs - scrape_configs: - - job_name: varlogs - static_configs: - - targets: - - localhost - labels: - job: varlogs - host: mylocalhost - __path__: /var/log/*.log - pipeline_stages: - - match: - selector: '{filename="/var/log/*.log"}' - stages: - - drop: - expression: '^[^0-9]{4}' - - regex: - expression: '^(?P\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}) \[(?P[[:alpha:]]+)\] (?:\d+)\#(?:\d+): \*(?:\d+) (?P.+)$' - - pack: - labels: - - level - clients: - - url: https://USER_ID:API_KEY@logs-prod3.grafana.net/loki/api/v1/push -``` - -The convert command takes the YAML file as input and outputs a [River][] file. - -{{< code >}} - -```static-binary -AGENT_MODE=flow grafana-agent convert --source-format=static --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH -``` - -```flow-binary -grafana-agent-flow convert --source-format=static --output=OUTPUT_CONFIG_PATH INPUT_CONFIG_PATH -``` - -{{< /code >}} - -The new Flow mode configuration file looks like this: - -```river -prometheus.scrape "metrics_test_local_agent" { - targets = [{ - __address__ = "127.0.0.1:12345", - cluster = "localhost", - }] - forward_to = [prometheus.remote_write.metrics_test.receiver] - job_name = "local-agent" - scrape_interval = "15s" -} - -prometheus.remote_write "metrics_test" { - endpoint { - name = "test-3a2a1b" - url = "https://prometheus-us-central1.grafana.net/api/prom/push" - - basic_auth { - username = "USERNAME" - password = "PASSWORD" - } - - queue_config { } - - metadata_config { } - } -} - -local.file_match "logs_varlogs_varlogs" { - path_targets = [{ - __address__ = "localhost", - __path__ = "/var/log/*.log", - host = "mylocalhost", - job = "varlogs", - }] -} - -loki.process "logs_varlogs_varlogs" { - forward_to = [loki.write.logs_varlogs.receiver] - - stage.match { - selector = "{filename=\"/var/log/*.log\"}" - - stage.drop { - expression = "^[^0-9]{4}" - } - - stage.regex { - expression = "^(?P\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2}) \\[(?P[[:alpha:]]+)\\] (?:\\d+)\\#(?:\\d+): \\*(?:\\d+) (?P.+)$" - } - - stage.pack { - labels = ["level"] - ingest_timestamp = false - } - } -} - -loki.source.file "logs_varlogs_varlogs" { - targets = local.file_match.logs_varlogs_varlogs.targets - forward_to = [loki.process.logs_varlogs_varlogs.receiver] - - file_watch { - min_poll_frequency = "1s" - max_poll_frequency = "5s" - } -} - -loki.write "logs_varlogs" { - endpoint { - url = "https://USER_ID:API_KEY@logs-prod3.grafana.net/loki/api/v1/push" - } - external_labels = {} -} -``` - -## Limitations - -Configuration conversion is done on a best-effort basis. The Agent will issue -warnings or errors where the conversion cannot be performed. - -Once the configuration is converted, we recommend that you review -the Flow mode configuration file, and verify that it is correct -before starting to use it in a production environment. - -Furthermore, we recommend that you review the following checklist: - -* The following configuration options are not available for conversion to Flow - mode: [Integrations next][], [Traces][], and [Agent Management][]. Any - additional unsupported features are returned as errors during conversion. -* There is no gRPC server to configure for Flow mode, so any non-default config - will show as unsupported during the conversion. -* Check if you are using any extra command line arguments with Static mode that - are not present in your configuration file. For example, `-server.http.address`. -* Check if you are using any environment variables in your [Static] mode configuration. - These will be evaluated during conversion and you may want to replace them - with the Flow Standard library [env] function after conversion. -* Review additional [Prometheus Limitations] for limitations specific to your - [Metrics] config. -* Review additional [Promtail Limitations] for limitations specific to your - [Logs] config. -* The logs produced by Grafana Agent Flow mode will differ from those - produced by Static mode. -* Grafana Agent exposes the [Grafana Agent Flow UI][]. - -[debugging]: #debugging - -{{% docs/reference %}} -[Static]: "/docs/agent/ -> /docs/agent//static" -[Static]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.scrape.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.remote_write.md" -[local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" -[local.file_match]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file_match.md" -[loki.process]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.process.md" -[loki.process]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.process.md" -[loki.source.file]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.file.md" -[loki.source.file]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.file.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" -[Start the agent]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md" -[Start the agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent.md" -[Flow Debugging]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md" -[Flow Debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md" -[River]: "/docs/agent/ -> /docs/agent//flow/config-language/" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/" -[Integrations next]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next/_index.md" -[Integrations next]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/traces-config.md -[Traces]: "/docs/agent/ -> /docs/agent//static/configuration/traces-config.md" -[Traces]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/traces-config.md" -[Agent Management]: "/docs/agent/ -> /docs/agent//static/configuration/agent-management.md" -[Agent Management]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/agent-management.md" -[env]: "/docs/agent/ -> /docs/agent//flow/reference/stdlib/env.md" -[env]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/env.md" -[Prometheus Limitations]: "/docs/agent/ -> /docs/agent//flow/getting-started/migrating-from-prometheus.md#limitations" -[Prometheus Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-prometheus.md#limitations" -[Promtail Limitations]: "/docs/agent/ -> /docs/agent//flow/getting-started/migrating-from-promtail.md#limitations" -[Promtail Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-promtail.md#limitations" -[Metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config.md" -[Metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/metrics-config.md" -[Logs]: "/docs/agent/ -> /docs/agent//static/configuration/logs-config.md" -[Logs]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/static/logs-config.md" -[Grafana Agent Flow UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging#grafana-agent-flow-ui" -[Grafana Agent Flow UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging#grafana-agent-flow-ui" -{{% /docs/reference %}} diff --git a/docs/sources/flow/monitoring/_index.md b/docs/sources/flow/monitoring/_index.md deleted file mode 100644 index 41e5cffca7c8..000000000000 --- a/docs/sources/flow/monitoring/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/ -title: Monitoring Grafana Agent Flow -description: Learn about monitoring Grafana Agent Flow -weight: 500 ---- - -# Monitoring Grafana Agent Flow - -This section details various ways to monitor and debug Grafana Agent Flow. - -{{< section >}} diff --git a/docs/sources/flow/monitoring/component_metrics.md b/docs/sources/flow/monitoring/component_metrics.md deleted file mode 100644 index ba9762647f4a..000000000000 --- a/docs/sources/flow/monitoring/component_metrics.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/component_metrics/ -- component-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/component_metrics/ -title: Component metrics -description: Learn about component metrics -weight: 200 ---- - -# Component metrics - -Grafana Agent Flow [components][] may optionally expose Prometheus metrics -which can be used to investigate the behavior of that component. These -component-specific metrics are only generated when an instance of that -component is running. - -> Component-specific metrics are different than any metrics being processed by -> the component. Component-specific metrics are used to expose the state of a -> component for observability, alerting, and debugging. - -Component-specific metrics are exposed at the `/metrics` HTTP endpoint of the -Grafana Agent HTTP server, which defaults to listening on -`http://localhost:12345`. - -> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to -> modify the address Grafana Agent listens on for HTTP traffic. - -Component-specific metrics will have a `component_id` label matching the -component ID generating those metrics. For example, component-specific metrics -for a `prometheus.remote_write` component labeled `production` will have a -`component_id` label with the value `prometheus.remote_write.production`. - -The [reference documentation][] for each component will describe the list of -component-specific metrics that component exposes. Not all components will -expose metrics. - -{{% docs/reference %}} -[components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" -[reference documentation]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[reference documentation]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/monitoring/controller_metrics.md b/docs/sources/flow/monitoring/controller_metrics.md deleted file mode 100644 index f45b114724cb..000000000000 --- a/docs/sources/flow/monitoring/controller_metrics.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/controller_metrics/ -- controller-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/controller_metrics/ -title: Controller metrics -description: Learn about controller metrics -weight: 100 ---- - -# Controller metrics - -The Grafana Agent Flow [component controller][] exposes Prometheus metrics -which can be used to investigate the controller state. - -Metrics for the controller are exposed at the `/metrics` HTTP endpoint of the -Grafana Agent HTTP server, which defaults to listening on -`http://localhost:12345`. - -> The documentation for the [`grafana-agent run`][grafana-agent run] command -> describes how to modify the address Grafana Agent listens on for HTTP -> traffic. - -The controller exposes the following metrics: - -* `agent_component_controller_evaluating` (Gauge): Set to `1` whenever the - component controller is currently evaluating components. Note that this value - may be misrepresented depending on how fast evaluations complete or how often - evaluations occur. -* `agent_component_controller_running_components` (Gauge): The current - number of running components by health. The health is represented in the - `health_type` label. -* `agent_component_evaluation_seconds` (Histogram): The time it takes to - evaluate components after one of their dependencies is updated. -* `agent_component_dependencies_wait_seconds` (Histogram): Time spent by - components waiting to be evaluated after one of their dependencies is updated. -* `agent_component_evaluation_queue_size` (Gauge): The current number of - component evaluations waiting to be performed. - -{{% docs/reference %}} -[component controller]: "/docs/agent/ -> /docs/agent//flow/concepts/component_controller.md" -[component controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/component_controller.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/monitoring/debugging.md b/docs/sources/flow/monitoring/debugging.md deleted file mode 100644 index cffa6f5ffb5d..000000000000 --- a/docs/sources/flow/monitoring/debugging.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/debugging/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/debugging/ -title: Debugging -description: Learn about debugging -weight: 300 ---- - -# Debugging - -Follow these steps to debug issues with Grafana Agent Flow: - -1. Use the Grafana Agent Flow UI to debug issues. -2. If the UI doesn't help with debugging an issue, logs can be examined - instead. - -## Grafana Agent Flow UI - -Grafana Agent Flow includes an embedded UI viewable from Grafana Agent's HTTP -server, which defaults to listening at `http://localhost:12345`. - -> **NOTE**: For security reasons, installations of Grafana Agent Flow on -> non-containerized platforms default to listening on `localhost`. default -> prevents other machines on the network from being able to view the UI. -> -> To expose the UI to other machines on the network on non-containerized -> platforms, refer to the documentation for how you [installed][install] -> Grafana Agent Flow. -> -> If you are running a custom installation of Grafana Agent Flow, refer to the -> documentation for [the `grafana-agent run` command][grafana-agent run] to -> learn how to change the HTTP listen address, and pass the appropriate flag -> when running Grafana Agent Flow. - -### Home page - -![](../../../assets/ui_home_page.png) - -The home page shows a table of components defined in the configuration file along with -their health. - -Click **View** on a row in the table to navigate to the [Component detail page](#component-detail-page) -for that component. - -Click the Grafana Agent logo to navigate back to the home page. - -### Graph page - -![](../../../assets/ui_graph_page.png) - -The **Graph** page shows a graph view of components defined in the configuration file -along with their health. Clicking a component in the graph navigates to the -[Component detail page](#component-detail-page) for that component. - -### Component detail page - -![](../../../assets/ui_component_detail_page.png) - -The component detail page shows the following information for each component: - -* The health of the component with a message explaining the health. -* The current evaluated arguments for the component. -* The current exports for the component. -* The current debug info for the component (if the component has debug info). - -> Values marked as a [secret][] are obfuscated and will display as the text -> `(secret)`. - -### Clustering page - -![](../../../assets/ui_clustering_page.png) - -The clustering page shows the following information for each cluster node: - -* The node's name. -* The node's advertised address. -* The node's current state (Viewer/Participant/Terminating). -* The local node that serves the UI. - -## Debugging using the UI - -To debug using the UI: - -* Ensure that no component is reported as unhealthy. -* Ensure that the arguments and exports for misbehaving components appear - correct. - -## Examining logs - -Logs may also help debug issues with Grafana Agent Flow. - -To reduce logging noise, many components hide debugging info behind debug-level -log lines. It is recommended that you configure the [`logging` block][logging] -to show debug-level log lines when debugging issues with Grafana Agent Flow. - -The location of Grafana Agent's logs is different based on how it is deployed. -Refer to the [`logging` block][logging] page to see how to find logs for your -system. - -## Debugging clustering issues - -To debug issues when using [clustering][], check for the following symptoms. - -- **Cluster not converging**: The cluster peers are not converging on the same - view of their peers' status. This is most likely due to network connectivity -issues between the cluster nodes. Use the Flow UI of each running peer to -understand which nodes are not being picked up correctly. -- **Cluster split brain**: The cluster peers are not aware of one another, - thinking they’re the only node present. Again, check for network connectivity -issues. Check that the addresses or DNS names given to the node to join are -correctly formatted and reachable. -- **Configuration drift**: Clustering assumes that all nodes are running with - the same configuration file at roughly the same time. Check the logs for -issues with the reloaded configuration file as well as the graph page to verify -changes have been applied. -- **Node name conflicts**: Clustering assumes all nodes have unique names; - nodes with conflicting names are rejected and will not join the cluster. Look -at the clustering UI page for the list of current peers with their names, and -check the logs for any reported name conflict events. -- **Node stuck in terminating state**: The node attempted to gracefully shut -down and set its state to Terminating, but it has not completely gone away. Check -the clustering page to view the state of the peers and verify that the -terminating Agent has been shut down. - -{{% docs/reference %}} -[logging]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/logging.md" -[logging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/logging.md" -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/clustering.md" -[install]: "/docs/agent/ -> /docs/agent//flow/setup/install" -[install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install" -[secret]: "/docs/agent/ -> /docs/agent//flow/config-language/expressions/types_and_values.md#secrets.md" -[secret]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/types_and_values.md#secrets.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" -{{% /docs/reference %}} - diff --git a/docs/sources/flow/reference/_index.md b/docs/sources/flow/reference/_index.md index e130628f0033..5c4e88aac9cc 100644 --- a/docs/sources/flow/reference/_index.md +++ b/docs/sources/flow/reference/_index.md @@ -3,16 +3,16 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/ +- /docs/grafana-cloud/send-data/agent/flow/reference/ canonical: https://grafana.com/docs/agent/latest/flow/reference/ -title: Grafana Agent Flow Reference -menuTitle: Reference description: The reference-level documentaiton for Grafana Agent +menuTitle: Reference +title: Grafana Agent Flow Reference weight: 600 --- -# Grafana Agent Flow Reference +# {{% param "PRODUCT_NAME" %}} Reference -This section provides reference-level documentation for the various parts of -Grafana Agent Flow: +This section provides reference-level documentation for the various parts of {{< param "PRODUCT_NAME" >}}: {{< section >}} diff --git a/docs/sources/flow/reference/cli/_index.md b/docs/sources/flow/reference/cli/_index.md index e48a3f703623..43fa4be774fd 100644 --- a/docs/sources/flow/reference/cli/_index.md +++ b/docs/sources/flow/reference/cli/_index.md @@ -3,28 +3,27 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/cli/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/ +- /docs/grafana-cloud/send-data/agent/flow/reference/cli/ canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/ -description: The Grafana Agent command line interface provides subcommands to perform - various operations. +description: Learn about the Grafana Agent command line interface menuTitle: Command-line interface title: The Grafana Agent command-line interface -description: Learn about the Grafana Agent command line interface weight: 100 --- -# The Grafana Agent command-line interface +# The {{% param "PRODUCT_ROOT_NAME" %}} command-line interface When in Flow mode, the `grafana-agent` binary exposes a command-line interface with subcommands to perform various operations. -The most common subcommand is [`run`][run] which accepts a config file and -starts Grafana Agent Flow. +The most common subcommand is [`run`][run] which accepts a configuration file and +starts {{< param "PRODUCT_NAME" >}}. Available commands: -* [`convert`][convert]: Convert a Grafana Agent configuration file. -* [`fmt`][fmt]: Format a Grafana Agent Flow configuration file. -* [`run`][run]: Start Grafana Agent Flow, given a configuration file. +* [`convert`][convert]: Convert a {{< param "PRODUCT_ROOT_NAME" >}} configuration file. +* [`fmt`][fmt]: Format a {{< param "PRODUCT_NAME" >}} configuration file. +* [`run`][run]: Start {{< param "PRODUCT_NAME" >}}, given a configuration file. * [`tools`][tools]: Read the WAL and provide statistical information. * `completion`: Generate shell completion for the `grafana-agent-flow` CLI. * `help`: Print help for supported commands. diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md index cd976f108784..a9a3810ec3ee 100644 --- a/docs/sources/flow/reference/cli/convert.md +++ b/docs/sources/flow/reference/cli/convert.md @@ -3,20 +3,19 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/cli/convert/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/convert/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/convert/ +- /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert/ canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/convert/ -description: The `convert` command converts supported configuration formats to River - format. +description: Learn about the convert command labels: stage: beta menuTitle: convert title: The convert command -description: Learn about the convert command weight: 100 --- # The convert command -The `convert` command converts a supported configuration format to Grafana Agent Flow River format. +The `convert` command converts a supported configuration format to {{< param "PRODUCT_NAME" >}} River format. ## Usage @@ -28,16 +27,16 @@ Usage: Replace the following: * `FLAG`: One or more flags that define the input and output of the command. - * `FILE_NAME`: The Grafana Agent configuration file. + * `FILE_NAME`: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. -If the `FILE_NAME` argument is not provided or if the `FILE_NAME` argument is +If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is equal to `-`, `convert` converts the contents of standard input. Otherwise, `convert` reads and converts the file from disk specified by the argument. -There are several different flags available for the `convert` command. You can use the `--output` flag to write the contents of the converted config to a specified path. You can use the `--report` flag to generate a diagnostic report. The `--bypass-errors` flag allows you to bypass any [errors] generated during the file conversion. +There are several different flags available for the `convert` command. You can use the `--output` flag to write the contents of the converted configuration to a specified path. You can use the `--report` flag to generate a diagnostic report. The `--bypass-errors` flag allows you to bypass any [errors] generated during the file conversion. -The command fails if the source config has syntactically incorrect -configuration or cannot be converted to Grafana Agent Flow River format. +The command fails if the source configuration has syntactically incorrect +configuration or can't be converted to {{< param "PRODUCT_NAME" >}} River format. The following flags are supported: @@ -49,6 +48,8 @@ The following flags are supported: * `--bypass-errors`, `-b`: Enable bypassing errors when converting. +* `--extra-args`, `e`: Extra arguments from the original format used by the converter. + [prometheus]: #prometheus [promtail]: #promtail [static]: #static @@ -56,10 +57,10 @@ The following flags are supported: ### Defaults -Flow Defaults are managed as follows: -* If a provided source config value matches a Flow default value, the property is left off the Flow output. -* If a non-provided source config value default matches a Flow default value, the property is left off the Flow output. -* If a non-provided source config value default doesn't match a Flow default value, the Flow default value is included in the Flow output. +{{< param "PRODUCT_NAME" >}} defaults are managed as follows: +* If a provided source configuration value matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. +* If a non-provided source configuration value default matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. +* If a non-provided source configuration value default doesn't match a {{< param "PRODUCT_NAME" >}} default value, the default value is included in the output. ### Errors @@ -71,38 +72,45 @@ where an output can still be generated. These can be bypassed using the Using the `--source-format=prometheus` will convert the source config from [Prometheus v2.45](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/) -to Grafana Agent Flow config. +to {{< param "PRODUCT_NAME" >}} configuration. This includes Prometheus features such as -[scrape_config](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config), +[scrape_config](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config), [relabel_config](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#relabel_config), [metric_relabel_configs](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#metric_relabel_configs), [remote_write](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write), -and many supported *_sd_configs. Unsupported features in a source config result +and many supported *_sd_configs. Unsupported features in a source configuration result in [errors]. -Refer to [Migrate from Prometheus to Grafana Agent Flow]({{< relref "../../getting-started/migrating-from-prometheus/" >}}) for a detailed migration guide. +Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-prometheus/" >}}) for a detailed migration guide. ### Promtail Using the `--source-format=promtail` will convert the source configuration from [Promtail v2.8.x](/docs/loki/v2.8.x/clients/promtail/) -to Grafana Agent Flow configuration. +to {{< param "PRODUCT_NAME" >}} configuration. Nearly all [Promtail features](/docs/loki/v2.8.x/clients/promtail/configuration/) -are supported and can be converted to Grafana Agent Flow config. +are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration. If you have unsupported features in a source configuration, you will receive [errors] when you convert to a flow configuration. The converter will also raise warnings for configuration options that may require your attention. -Refer to [Migrate from Promtail to Grafana Agent Flow]({{< relref "../../getting-started/migrating-from-promtail/" >}}) for a detailed migration guide. +Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-promtail/" >}}) for a detailed migration guide. ### Static -Using the `--source-format=static` will convert the source configuration from -Grafana Agent [Static]({{< relref "../../../static" >}}) mode to Flow mode configuration. +Using the `--source-format=static` will convert the source configuration from a +[Grafana Agent Static]({{< relref "../../../static" >}}) configuration to a {{< param "PRODUCT_NAME" >}} configuration. + +Include `--extra-args` for passing additional command line flags from the original format. +For example, `--extra-args="-enable-features=integrations-next"` will convert a Grafana Agent Static +[integrations-next]({{< relref "../../../static/configuration/integrations/integrations-next/" >}}) +configuration to a {{< param "PRODUCT_NAME" >}} configuration. You can also +expand environment variables with `--extra-args="-config.expand-env"`. You can combine multiple command line +flags with a space between each flag, for example `--extra-args="-enable-features=integrations-next -config.expand-env"`. If you have unsupported features in a Static mode source configuration, you will receive [errors][] when you convert to a Flow mode configuration. The converter will also raise warnings for configuration options that may require your attention. -Refer to [Migrate Grafana Agent from Static mode to Flow mode]({{< relref "../../getting-started/migrating-from-static/" >}}) for a detailed migration guide. \ No newline at end of file +Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-static/" >}}) for a detailed migration guide. \ No newline at end of file diff --git a/docs/sources/flow/reference/cli/fmt.md b/docs/sources/flow/reference/cli/fmt.md index 18ae1fdddd41..7a266921d365 100644 --- a/docs/sources/flow/reference/cli/fmt.md +++ b/docs/sources/flow/reference/cli/fmt.md @@ -3,17 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/cli/fmt/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/fmt/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/fmt/ +- /docs/grafana-cloud/send-data/agent/flow/reference/cli/fmt/ canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/fmt/ -description: The `fmt` command formats a Grafana Agent configuration file. +description: Learn about the fmt command menuTitle: fmt title: The fmt command -description: Learn about the fmt command weight: 200 --- # The fmt command -The `fmt` command formats a given Grafana Agent Flow configuration file. +The `fmt` command formats a given {{< param "PRODUCT_NAME" >}} configuration file. ## Usage @@ -25,7 +25,7 @@ Usage: Replace the following: * `FLAG`: One or more flags that define the input and output of the command. - * `FILE_NAME`: The Grafana Agent configuration file. + * `FILE_NAME`: The {{< param "PRODUCT_NAME" >}} configuration file. If the `FILE_NAME` argument is not provided or if the `FILE_NAME` argument is equal to `-`, `fmt` formats the contents of standard input. Otherwise, @@ -42,4 +42,4 @@ properly. The following flags are supported: * `--write`, `-w`: Write the formatted file back to disk when not reading from - standard input. \ No newline at end of file + standard input. diff --git a/docs/sources/flow/reference/cli/run.md b/docs/sources/flow/reference/cli/run.md index 8b068b78ac3e..4da0df47a473 100644 --- a/docs/sources/flow/reference/cli/run.md +++ b/docs/sources/flow/reference/cli/run.md @@ -3,19 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/cli/run/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/run/ +- /docs/grafana-cloud/send-data/agent/flow/reference/cli/run/ canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/run/ -description: The `run` command runs Grafana Agent in the foreground until an interrupt - is received. +description: Learn about the run command menuTitle: run title: The run command -description: Learn about the run command weight: 300 --- # The run command -The `run` command runs Grafana Agent Flow in the foreground until an -interrupt is received. +The `run` command runs {{< param "PRODUCT_NAME" >}} in the foreground until an interrupt is received. ## Usage @@ -27,18 +25,18 @@ Usage: Replace the following: * `FLAG`: One or more flags that define the input and output of the command. - * `PATH_NAME`: Required. The Grafana Agent configuration file/directory path. + * `PATH_NAME`: Required. The {{< param "PRODUCT_NAME" >}} configuration file/directory path. -If the `PATH_NAME` argument is not provided, or if the configuration path can't be loaded or +If the `PATH_NAME` argument is not provided, or if the configuration path can't be loaded or contains errors during the initial load, the `run` command will immediately exit and show an error message. -If you give the `PATH_NAME` argument a directory path, the agent will find `*.river` files +If you give the `PATH_NAME` argument a directory path, {{< param "PRODUCT_NAME" >}} will find `*.river` files (ignoring nested directories) and load them as a single configuration source. However, component names must be **unique** across all River files, and configuration blocks must not be repeated. -Grafana Agent Flow will continue to run if subsequent reloads of the configuration +{{< param "PRODUCT_NAME" >}} will continue to run if subsequent reloads of the configuration file fail, potentially marking components as unhealthy depending on the nature -of the failure. When this happens, Grafana Agent Flow will continue functioning +of the failure. When this happens, {{< param "PRODUCT_NAME" >}} will continue functioning in the last valid state. `run` launches an HTTP server that exposes metrics about itself and its @@ -54,7 +52,7 @@ The following flags are supported: * `--server.http.ui-path-prefix`: Base path where the UI is exposed (default `/`). * `--storage.path`: Base directory where components can store data (default `data-agent/`). * `--disable-reporting`: Disable [data collection][] (default `false`). -* `--cluster.enabled`: Start the Agent in clustered mode (default `false`). +* `--cluster.enabled`: Start {{< param "PRODUCT_NAME" >}} in clustered mode (default `false`). * `--cluster.node-name`: The name to use for this node (defaults to the environment's hostname). * `--cluster.join-addresses`: Comma-separated list of addresses to join the cluster at (default `""`). Mutually exclusive with `--cluster.discover-peers`. * `--cluster.discover-peers`: List of key-value tuples for discovering peers (default `""`). Mutually exclusive with `--cluster.join-addresses`. @@ -65,6 +63,7 @@ The following flags are supported: * `--cluster.name`: Name to prevent nodes without this identifier from joining the cluster (default `""`). * `--config.format`: The format of the source file. Supported formats: `flow`, `prometheus`, `promtail`, `static` (default `"flow"`). * `--config.bypass-conversion-errors`: Enable bypassing errors when converting (default `false`). +* `--config.extra-args`: Extra arguments from the original format used by the converter. [in-memory HTTP traffic]: {{< relref "../../concepts/component_controller.md#in-memory-traffic" >}} [data collection]: {{< relref "../../../data-collection" >}} @@ -75,7 +74,7 @@ The following flags are supported: The configuration file can be reloaded from disk by either: * Sending an HTTP POST request to the `/-/reload` endpoint. -* Sending a `SIGHUP` signal to the Grafana Agent process. +* Sending a `SIGHUP` signal to the {{< param "PRODUCT_NAME" >}} process. When this happens, the [component controller][] synchronizes the set of running components with the latest set of components specified in the configuration file. @@ -90,7 +89,7 @@ reloading. ## Clustering (beta) -The `--cluster.enabled` command-line argument starts Grafana Agent in +The `--cluster.enabled` command-line argument starts {{< param "PRODUCT_ROOT_NAME" >}} in [clustering][] mode. The rest of the `--cluster.*` command-line flags can be used to configure how nodes discover and connect to one another. @@ -98,16 +97,16 @@ Each cluster member’s name must be unique within the cluster. Nodes which try to join with a conflicting name are rejected and will fall back to bootstrapping a new cluster of their own. -Peers communicate over HTTP/2 on the agent's built-in HTTP server. Each node +Peers communicate over HTTP/2 on the built-in HTTP server. Each node must be configured to accept connections on `--server.http.listen-addr` and the address defined or inferred in `--cluster.advertise-address`. -If the `--cluster.advertise-address` flag is not explicitly set, the agent +If the `--cluster.advertise-address` flag isn't explicitly set, {{< param "PRODUCT_NAME" >}} tries to infer a suitable one from `--cluster.advertise-interfaces`. -If `--cluster.advertise-interfaces` is not explicitly set, the agent will +If `--cluster.advertise-interfaces` isn't explicitly set, {{< param "PRODUCT_NAME" >}} will infer one from the `eth0` and `en0` local network interfaces. -The agent will fail to start if it can't determine the advertised address. -Since Windows does not use the interface names `eth0` or `en0`, Windows users must explicitly pass +{{< param "PRODUCT_NAME" >}} will fail to start if it can't determine the advertised address. +Since Windows doesn't use the interface names `eth0` or `en0`, Windows users must explicitly pass at least one valid network interface for `--cluster.advertise-interfaces` or a value for `--cluster.advertise-address`. The comma-separated list of addresses provided in `--cluster.join-addresses` @@ -146,10 +145,10 @@ The first node that is used to bootstrap a new cluster (also known as the "seed node") can either omit the flags that specify peers to join or can try to connect to itself. -To join or rejoin a cluster, the agent will try to connect to a certain number of peers limited by the `--cluster.max-join-peers` flag. +To join or rejoin a cluster, {{< param "PRODUCT_NAME" >}} will try to connect to a certain number of peers limited by the `--cluster.max-join-peers` flag. This flag can be useful for clusters of significant sizes because connecting to a high number of peers can be an expensive operation. To disable this behavior, set the `--cluster.max-join-peers` flag to 0. -If the value of `--cluster.max-join-peers` is higher than the number of peers discovered, the agent will connect to all of them. +If the value of `--cluster.max-join-peers` is higher than the number of peers discovered, {{< param "PRODUCT_NAME" >}} will connect to all of them. The `--cluster.name` flag can be used to prevent clusters from accidentally merging. When `--cluster.name` is provided, nodes will only join peers who share the same cluster name value. @@ -158,39 +157,38 @@ Attempting to join a cluster with a wrong `--cluster.name` will result in a "fai ### Clustering states -Clustered agents are in one of three states: +Clustered {{< param "PRODUCT_ROOT_NAME" >}}s are in one of three states: -* **Viewer**: The agent has a read-only view of the cluster and is not - participating in workload distribution. +* **Viewer**: {{< param "PRODUCT_NAME" >}} has a read-only view of the cluster and isn't participating in workload distribution. -* **Participant**: The agent is participating in workload distribution for - components that have clustering enabled. +* **Participant**: {{< param "PRODUCT_NAME" >}} is participating in workload distribution for components that have clustering enabled. -* **Terminating**: The agent is shutting down and will no longer assign new - work to itself. +* **Terminating**: {{< param "PRODUCT_NAME" >}} is shutting down and will no longer assign new work to itself. -Agents initially join the cluster in the viewer state and then transition to -the participant state after the process startup completes. Agents then -transition to the terminating state when shutting down. +Each {{< param "PRODUCT_ROOT_NAME" >}} initially joins the cluster in the viewer state and then transitions to +the participant state after the process startup completes. Each {{< param "PRODUCT_ROOT_NAME" >}} then +transitions to the terminating state when shutting down. -The current state of a clustered agent is shown on the clustering page in the -[UI][]. +The current state of a clustered {{< param "PRODUCT_ROOT_NAME" >}} is shown on the clustering page in the [UI][]. -[UI]: {{< relref "../../monitoring/debugging.md#clustering-page" >}} +[UI]: {{< relref "../../tasks/debug.md#clustering-page" >}} ## Configuration conversion (beta) When you use the `--config.format` command-line argument with a value -other than `flow`, Grafana Agent converts the configuration file from +other than `flow`, {{< param "PRODUCT_ROOT_NAME" >}} converts the configuration file from the source format to River and immediately starts running with the new configuration. This conversion uses the converter API described in the [grafana-agent-flow convert][] docs. -If you also use the `--config.bypass-conversion-errors` command-line argument, -Grafana Agent will ignore any errors from the converter. Use this argument +If you include the `--config.bypass-conversion-errors` command-line argument, +{{< param "PRODUCT_NAME" >}} will ignore any errors from the converter. Use this argument with caution because the resulting conversion may not be equivalent to the original configuration. +Include `--config.extra-args` to pass additional command line flags from the original format to the converter. +Refer to [grafana-agent-flow convert][] for more details on how `extra-args` work. + [grafana-agent-flow convert]: {{< relref "./convert.md" >}} [clustering]: {{< relref "../../concepts/clustering.md" >}} [go-discover]: https://github.com/hashicorp/go-discover diff --git a/docs/sources/flow/reference/cli/tools.md b/docs/sources/flow/reference/cli/tools.md index ac888bda3fa6..b45e7f215a23 100644 --- a/docs/sources/flow/reference/cli/tools.md +++ b/docs/sources/flow/reference/cli/tools.md @@ -3,11 +3,11 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/cli/tools/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/tools/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/tools/ +- /docs/grafana-cloud/send-data/agent/flow/reference/cli/tools/ canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/tools/ -description: Command line tools that read the WAL and provide statistical information. +description: Learn about the tools command menuTitle: tools title: The tools command -description: Learn about the tools command weight: 400 --- @@ -24,7 +24,7 @@ guarantees and may change or be removed between releases. ### prometheus.remote_write sample-stats -Usage: +Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` @@ -47,7 +47,7 @@ The following flag is supported: ### prometheus.remote_write target-stats -Usage: +Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` diff --git a/docs/sources/flow/reference/compatibility/_index.md b/docs/sources/flow/reference/compatibility/_index.md new file mode 100644 index 000000000000..96539228f434 --- /dev/null +++ b/docs/sources/flow/reference/compatibility/_index.md @@ -0,0 +1,375 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/reference/compatible-components/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/compatible-components/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/compatible-components/ +- /docs/grafana-cloud/send-data/agent/flow/reference/compatible-components/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/compatible-components/ +description: Learn about which components are compatible with each other in Grafana Agent Flow +title: Compatible components +weight: 400 +--- + +# Compatible components + +This section provides an overview of _some_ of the possible connections between +compatible components in Grafana Agent Flow. + +For each common data type, we provide a list of compatible components +that can export or consume it. + +{{% admonition type="note" %}} + +> The type of export may not be the only requirement for chaining components together. +> The value of an attribute may matter as well as its type. +> Please refer to each component's documentation for more details on what values are acceptable. +> +> For example: +> * A Prometheus component may always expect an `"__address__"` label inside a list of targets. +> * A `string` argument may only accept certain values like "traceID" or "spanID". + +{{% /admonition %}} + +## Targets + +Targets are a `list(map(string))` - a [list]({{< relref "../../concepts/config-language/expressions/types_and_values/#naming-convention" >}}) of [maps]({{< relref "../../concepts/config-language/expressions/types_and_values/#naming-convention" >}}) with [string]({{< relref "../../concepts/config-language/expressions/types_and_values/#strings" >}}) values. +They can contain different key-value pairs, and you can use them with a wide range of +components. Some components require Targets to contain specific key-value pairs +to work correctly. It is recommended to always check component references for +details when working with Targets. + + +### Targets Exporters +The following components, grouped by namespace, _export_ Targets. + + + +{{< collapse title="discovery" >}} +- [discovery.azure]({{< relref "../components/discovery.azure.md" >}}) +- [discovery.consul]({{< relref "../components/discovery.consul.md" >}}) +- [discovery.consulagent]({{< relref "../components/discovery.consulagent.md" >}}) +- [discovery.digitalocean]({{< relref "../components/discovery.digitalocean.md" >}}) +- [discovery.dns]({{< relref "../components/discovery.dns.md" >}}) +- [discovery.docker]({{< relref "../components/discovery.docker.md" >}}) +- [discovery.dockerswarm]({{< relref "../components/discovery.dockerswarm.md" >}}) +- [discovery.ec2]({{< relref "../components/discovery.ec2.md" >}}) +- [discovery.eureka]({{< relref "../components/discovery.eureka.md" >}}) +- [discovery.file]({{< relref "../components/discovery.file.md" >}}) +- [discovery.gce]({{< relref "../components/discovery.gce.md" >}}) +- [discovery.hetzner]({{< relref "../components/discovery.hetzner.md" >}}) +- [discovery.http]({{< relref "../components/discovery.http.md" >}}) +- [discovery.ionos]({{< relref "../components/discovery.ionos.md" >}}) +- [discovery.kubelet]({{< relref "../components/discovery.kubelet.md" >}}) +- [discovery.kubernetes]({{< relref "../components/discovery.kubernetes.md" >}}) +- [discovery.kuma]({{< relref "../components/discovery.kuma.md" >}}) +- [discovery.lightsail]({{< relref "../components/discovery.lightsail.md" >}}) +- [discovery.linode]({{< relref "../components/discovery.linode.md" >}}) +- [discovery.marathon]({{< relref "../components/discovery.marathon.md" >}}) +- [discovery.nerve]({{< relref "../components/discovery.nerve.md" >}}) +- [discovery.nomad]({{< relref "../components/discovery.nomad.md" >}}) +- [discovery.openstack]({{< relref "../components/discovery.openstack.md" >}}) +- [discovery.ovhcloud]({{< relref "../components/discovery.ovhcloud.md" >}}) +- [discovery.puppetdb]({{< relref "../components/discovery.puppetdb.md" >}}) +- [discovery.relabel]({{< relref "../components/discovery.relabel.md" >}}) +- [discovery.scaleway]({{< relref "../components/discovery.scaleway.md" >}}) +- [discovery.serverset]({{< relref "../components/discovery.serverset.md" >}}) +- [discovery.triton]({{< relref "../components/discovery.triton.md" >}}) +- [discovery.uyuni]({{< relref "../components/discovery.uyuni.md" >}}) +{{< /collapse >}} + +{{< collapse title="local" >}} +- [local.file_match]({{< relref "../components/local.file_match.md" >}}) +{{< /collapse >}} + +{{< collapse title="prometheus" >}} +- [prometheus.exporter.agent]({{< relref "../components/prometheus.exporter.agent.md" >}}) +- [prometheus.exporter.apache]({{< relref "../components/prometheus.exporter.apache.md" >}}) +- [prometheus.exporter.azure]({{< relref "../components/prometheus.exporter.azure.md" >}}) +- [prometheus.exporter.blackbox]({{< relref "../components/prometheus.exporter.blackbox.md" >}}) +- [prometheus.exporter.cadvisor]({{< relref "../components/prometheus.exporter.cadvisor.md" >}}) +- [prometheus.exporter.cloudwatch]({{< relref "../components/prometheus.exporter.cloudwatch.md" >}}) +- [prometheus.exporter.consul]({{< relref "../components/prometheus.exporter.consul.md" >}}) +- [prometheus.exporter.dnsmasq]({{< relref "../components/prometheus.exporter.dnsmasq.md" >}}) +- [prometheus.exporter.elasticsearch]({{< relref "../components/prometheus.exporter.elasticsearch.md" >}}) +- [prometheus.exporter.gcp]({{< relref "../components/prometheus.exporter.gcp.md" >}}) +- [prometheus.exporter.github]({{< relref "../components/prometheus.exporter.github.md" >}}) +- [prometheus.exporter.kafka]({{< relref "../components/prometheus.exporter.kafka.md" >}}) +- [prometheus.exporter.memcached]({{< relref "../components/prometheus.exporter.memcached.md" >}}) +- [prometheus.exporter.mongodb]({{< relref "../components/prometheus.exporter.mongodb.md" >}}) +- [prometheus.exporter.mssql]({{< relref "../components/prometheus.exporter.mssql.md" >}}) +- [prometheus.exporter.mysql]({{< relref "../components/prometheus.exporter.mysql.md" >}}) +- [prometheus.exporter.oracledb]({{< relref "../components/prometheus.exporter.oracledb.md" >}}) +- [prometheus.exporter.postgres]({{< relref "../components/prometheus.exporter.postgres.md" >}}) +- [prometheus.exporter.process]({{< relref "../components/prometheus.exporter.process.md" >}}) +- [prometheus.exporter.redis]({{< relref "../components/prometheus.exporter.redis.md" >}}) +- [prometheus.exporter.snmp]({{< relref "../components/prometheus.exporter.snmp.md" >}}) +- [prometheus.exporter.snowflake]({{< relref "../components/prometheus.exporter.snowflake.md" >}}) +- [prometheus.exporter.squid]({{< relref "../components/prometheus.exporter.squid.md" >}}) +- [prometheus.exporter.statsd]({{< relref "../components/prometheus.exporter.statsd.md" >}}) +- [prometheus.exporter.unix]({{< relref "../components/prometheus.exporter.unix.md" >}}) +- [prometheus.exporter.vsphere]({{< relref "../components/prometheus.exporter.vsphere.md" >}}) +- [prometheus.exporter.windows]({{< relref "../components/prometheus.exporter.windows.md" >}}) +{{< /collapse >}} + + + + + +### Targets Consumers +The following components, grouped by namespace, _consume_ Targets. + + + +{{< collapse title="discovery" >}} +- [discovery.relabel]({{< relref "../components/discovery.relabel.md" >}}) +{{< /collapse >}} + +{{< collapse title="local" >}} +- [local.file_match]({{< relref "../components/local.file_match.md" >}}) +{{< /collapse >}} + +{{< collapse title="loki" >}} +- [loki.source.docker]({{< relref "../components/loki.source.docker.md" >}}) +- [loki.source.file]({{< relref "../components/loki.source.file.md" >}}) +- [loki.source.kubernetes]({{< relref "../components/loki.source.kubernetes.md" >}}) +{{< /collapse >}} + +{{< collapse title="otelcol" >}} +- [otelcol.processor.discovery]({{< relref "../components/otelcol.processor.discovery.md" >}}) +{{< /collapse >}} + +{{< collapse title="prometheus" >}} +- [prometheus.scrape]({{< relref "../components/prometheus.scrape.md" >}}) +{{< /collapse >}} + +{{< collapse title="pyroscope" >}} +- [pyroscope.ebpf]({{< relref "../components/pyroscope.ebpf.md" >}}) +- [pyroscope.scrape]({{< relref "../components/pyroscope.scrape.md" >}}) +{{< /collapse >}} + + + + +## Prometheus `MetricsReceiver` + +The Prometheus metrics are sent between components using `MetricsReceiver`s. +`MetricsReceiver`s are [capsules]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) +that are exported by components that can receive Prometheus metrics. Components that +can consume Prometheus metrics can be passed the `MetricsReceiver` as an argument. Use the +following components to build your Prometheus metrics pipeline: + + +### Prometheus `MetricsReceiver` Exporters +The following components, grouped by namespace, _export_ Prometheus `MetricsReceiver`. + + + +{{< collapse title="otelcol" >}} +- [otelcol.receiver.prometheus]({{< relref "../components/otelcol.receiver.prometheus.md" >}}) +{{< /collapse >}} + +{{< collapse title="prometheus" >}} +- [prometheus.relabel]({{< relref "../components/prometheus.relabel.md" >}}) +- [prometheus.remote_write]({{< relref "../components/prometheus.remote_write.md" >}}) +{{< /collapse >}} + + + + +### Prometheus `MetricsReceiver` Consumers +The following components, grouped by namespace, _consume_ Prometheus `MetricsReceiver`. + + + + +{{< collapse title="otelcol" >}} +- [otelcol.exporter.prometheus]({{< relref "../components/otelcol.exporter.prometheus.md" >}}) +{{< /collapse >}} + +{{< collapse title="prometheus" >}} +- [prometheus.operator.podmonitors]({{< relref "../components/prometheus.operator.podmonitors.md" >}}) +- [prometheus.operator.probes]({{< relref "../components/prometheus.operator.probes.md" >}}) +- [prometheus.operator.servicemonitors]({{< relref "../components/prometheus.operator.servicemonitors.md" >}}) +- [prometheus.receive_http]({{< relref "../components/prometheus.receive_http.md" >}}) +- [prometheus.relabel]({{< relref "../components/prometheus.relabel.md" >}}) +- [prometheus.scrape]({{< relref "../components/prometheus.scrape.md" >}}) +{{< /collapse >}} + + + + + +## Loki `LogsReceiver` + +`LogsReceiver` is a [capsule]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) +that is exported by components that can receive Loki logs. Components that +consume `LogsReceiver` as an argument typically send logs to it. Use the +following components to build your Loki logs pipeline: + + +### Loki `LogsReceiver` Exporters +The following components, grouped by namespace, _export_ Loki `LogsReceiver`. + + + +{{< collapse title="loki" >}} +- [loki.echo]({{< relref "../components/loki.echo.md" >}}) +- [loki.process]({{< relref "../components/loki.process.md" >}}) +- [loki.relabel]({{< relref "../components/loki.relabel.md" >}}) +- [loki.write]({{< relref "../components/loki.write.md" >}}) +{{< /collapse >}} + +{{< collapse title="otelcol" >}} +- [otelcol.receiver.loki]({{< relref "../components/otelcol.receiver.loki.md" >}}) +{{< /collapse >}} + + + + +### Loki `LogsReceiver` Consumers +The following components, grouped by namespace, _consume_ Loki `LogsReceiver`. + + + +{{< collapse title="faro" >}} +- [faro.receiver]({{< relref "../components/faro.receiver.md" >}}) +{{< /collapse >}} + +{{< collapse title="loki" >}} +- [loki.process]({{< relref "../components/loki.process.md" >}}) +- [loki.relabel]({{< relref "../components/loki.relabel.md" >}}) +- [loki.source.api]({{< relref "../components/loki.source.api.md" >}}) +- [loki.source.awsfirehose]({{< relref "../components/loki.source.awsfirehose.md" >}}) +- [loki.source.azure_event_hubs]({{< relref "../components/loki.source.azure_event_hubs.md" >}}) +- [loki.source.cloudflare]({{< relref "../components/loki.source.cloudflare.md" >}}) +- [loki.source.docker]({{< relref "../components/loki.source.docker.md" >}}) +- [loki.source.file]({{< relref "../components/loki.source.file.md" >}}) +- [loki.source.gcplog]({{< relref "../components/loki.source.gcplog.md" >}}) +- [loki.source.gelf]({{< relref "../components/loki.source.gelf.md" >}}) +- [loki.source.heroku]({{< relref "../components/loki.source.heroku.md" >}}) +- [loki.source.journal]({{< relref "../components/loki.source.journal.md" >}}) +- [loki.source.kafka]({{< relref "../components/loki.source.kafka.md" >}}) +- [loki.source.kubernetes]({{< relref "../components/loki.source.kubernetes.md" >}}) +- [loki.source.kubernetes_events]({{< relref "../components/loki.source.kubernetes_events.md" >}}) +- [loki.source.podlogs]({{< relref "../components/loki.source.podlogs.md" >}}) +- [loki.source.syslog]({{< relref "../components/loki.source.syslog.md" >}}) +- [loki.source.windowsevent]({{< relref "../components/loki.source.windowsevent.md" >}}) +{{< /collapse >}} + +{{< collapse title="otelcol" >}} +- [otelcol.exporter.loki]({{< relref "../components/otelcol.exporter.loki.md" >}}) +{{< /collapse >}} + + + + +## OpenTelemetry `otelcol.Consumer` + +The OpenTelemetry data is sent between components using `otelcol.Consumer`s. +`otelcol.Consumer`s are [capsules]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) +that are exported by components that can receive OpenTelemetry data. Components that +can consume OpenTelemetry data can be passed the `otelcol.Consumer` as an argument. Note that some components +that use `otelcol.Consumer` only support a subset of telemetry signals, for example, only traces. Check the component +reference pages for more details on what is supported. Use the following components to build your OpenTelemetry pipeline: + + +### OpenTelemetry `otelcol.Consumer` Exporters +The following components, grouped by namespace, _export_ OpenTelemetry `otelcol.Consumer`. + + + +{{< collapse title="otelcol" >}} +- [otelcol.connector.servicegraph]({{< relref "../components/otelcol.connector.servicegraph.md" >}}) +- [otelcol.connector.spanlogs]({{< relref "../components/otelcol.connector.spanlogs.md" >}}) +- [otelcol.connector.spanmetrics]({{< relref "../components/otelcol.connector.spanmetrics.md" >}}) +- [otelcol.exporter.loadbalancing]({{< relref "../components/otelcol.exporter.loadbalancing.md" >}}) +- [otelcol.exporter.logging]({{< relref "../components/otelcol.exporter.logging.md" >}}) +- [otelcol.exporter.loki]({{< relref "../components/otelcol.exporter.loki.md" >}}) +- [otelcol.exporter.otlp]({{< relref "../components/otelcol.exporter.otlp.md" >}}) +- [otelcol.exporter.otlphttp]({{< relref "../components/otelcol.exporter.otlphttp.md" >}}) +- [otelcol.exporter.prometheus]({{< relref "../components/otelcol.exporter.prometheus.md" >}}) +- [otelcol.processor.attributes]({{< relref "../components/otelcol.processor.attributes.md" >}}) +- [otelcol.processor.batch]({{< relref "../components/otelcol.processor.batch.md" >}}) +- [otelcol.processor.discovery]({{< relref "../components/otelcol.processor.discovery.md" >}}) +- [otelcol.processor.filter]({{< relref "../components/otelcol.processor.filter.md" >}}) +- [otelcol.processor.k8sattributes]({{< relref "../components/otelcol.processor.k8sattributes.md" >}}) +- [otelcol.processor.memory_limiter]({{< relref "../components/otelcol.processor.memory_limiter.md" >}}) +- [otelcol.processor.probabilistic_sampler]({{< relref "../components/otelcol.processor.probabilistic_sampler.md" >}}) +- [otelcol.processor.span]({{< relref "../components/otelcol.processor.span.md" >}}) +- [otelcol.processor.tail_sampling]({{< relref "../components/otelcol.processor.tail_sampling.md" >}}) +- [otelcol.processor.transform]({{< relref "../components/otelcol.processor.transform.md" >}}) +{{< /collapse >}} + + + + +### OpenTelemetry `otelcol.Consumer` Consumers +The following components, grouped by namespace, _consume_ OpenTelemetry `otelcol.Consumer`. + + + +{{< collapse title="faro" >}} +- [faro.receiver]({{< relref "../components/faro.receiver.md" >}}) +{{< /collapse >}} + +{{< collapse title="otelcol" >}} +- [otelcol.connector.servicegraph]({{< relref "../components/otelcol.connector.servicegraph.md" >}}) +- [otelcol.connector.spanlogs]({{< relref "../components/otelcol.connector.spanlogs.md" >}}) +- [otelcol.connector.spanmetrics]({{< relref "../components/otelcol.connector.spanmetrics.md" >}}) +- [otelcol.processor.attributes]({{< relref "../components/otelcol.processor.attributes.md" >}}) +- [otelcol.processor.batch]({{< relref "../components/otelcol.processor.batch.md" >}}) +- [otelcol.processor.discovery]({{< relref "../components/otelcol.processor.discovery.md" >}}) +- [otelcol.processor.filter]({{< relref "../components/otelcol.processor.filter.md" >}}) +- [otelcol.processor.k8sattributes]({{< relref "../components/otelcol.processor.k8sattributes.md" >}}) +- [otelcol.processor.memory_limiter]({{< relref "../components/otelcol.processor.memory_limiter.md" >}}) +- [otelcol.processor.probabilistic_sampler]({{< relref "../components/otelcol.processor.probabilistic_sampler.md" >}}) +- [otelcol.processor.span]({{< relref "../components/otelcol.processor.span.md" >}}) +- [otelcol.processor.tail_sampling]({{< relref "../components/otelcol.processor.tail_sampling.md" >}}) +- [otelcol.processor.transform]({{< relref "../components/otelcol.processor.transform.md" >}}) +- [otelcol.receiver.jaeger]({{< relref "../components/otelcol.receiver.jaeger.md" >}}) +- [otelcol.receiver.kafka]({{< relref "../components/otelcol.receiver.kafka.md" >}}) +- [otelcol.receiver.loki]({{< relref "../components/otelcol.receiver.loki.md" >}}) +- [otelcol.receiver.opencensus]({{< relref "../components/otelcol.receiver.opencensus.md" >}}) +- [otelcol.receiver.otlp]({{< relref "../components/otelcol.receiver.otlp.md" >}}) +- [otelcol.receiver.prometheus]({{< relref "../components/otelcol.receiver.prometheus.md" >}}) +- [otelcol.receiver.vcenter]({{< relref "../components/otelcol.receiver.vcenter.md" >}}) +- [otelcol.receiver.zipkin]({{< relref "../components/otelcol.receiver.zipkin.md" >}}) +{{< /collapse >}} + + + + + +## Pyroscope `ProfilesReceiver` + +The Pyroscope profiles are sent between components using `ProfilesReceiver`s. +`ProfilesReceiver`s are [capsules]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) +that are exported by components that can receive Pyroscope profiles. Components that +can consume Pyroscope profiles can be passed the `ProfilesReceiver` as an argument. Use the +following components to build your Pyroscope profiles pipeline: + + +### Pyroscope `ProfilesReceiver` Exporters +The following components, grouped by namespace, _export_ Pyroscope `ProfilesReceiver`. + + + +{{< collapse title="pyroscope" >}} +- [pyroscope.write]({{< relref "../components/pyroscope.write.md" >}}) +{{< /collapse >}} + + + + +### Pyroscope `ProfilesReceiver` Consumers +The following components, grouped by namespace, _consume_ Pyroscope `ProfilesReceiver`. + + + +{{< collapse title="pyroscope" >}} +- [pyroscope.ebpf]({{< relref "../components/pyroscope.ebpf.md" >}}) +- [pyroscope.scrape]({{< relref "../components/pyroscope.scrape.md" >}}) +{{< /collapse >}} + + + diff --git a/docs/sources/flow/reference/components/_index.md b/docs/sources/flow/reference/components/_index.md index 4acc33d412ec..3eafecb3c1af 100644 --- a/docs/sources/flow/reference/components/_index.md +++ b/docs/sources/flow/reference/components/_index.md @@ -3,16 +3,16 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/ +description: Learn about the components in Grafana Agent Flow title: Components reference -description: Learn about the compenets in Grafana Agent weight: 300 --- # Components reference -This section contains reference documentation for all recognized -[components][]. +This section contains reference documentation for all recognized [components][]. {{< section >}} diff --git a/docs/sources/flow/reference/components/discovery.azure.md b/docs/sources/flow/reference/components/discovery.azure.md index 89e3df0d970e..83eceabdf7a6 100644 --- a/docs/sources/flow/reference/components/discovery.azure.md +++ b/docs/sources/flow/reference/components/discovery.azure.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.azure/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.azure/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.azure/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.azure/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.azure/ -title: discovery.azure description: Learn about discovery.azure +title: discovery.azure --- # discovery.azure @@ -69,7 +70,7 @@ Name | Type | Description | Default | Required ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -148,3 +149,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.azure` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.consul.md b/docs/sources/flow/reference/components/discovery.consul.md index 583b5497b550..c63f94b8017c 100644 --- a/docs/sources/flow/reference/components/discovery.consul.md +++ b/docs/sources/flow/reference/components/discovery.consul.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.consul/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.consul/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.consul/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.consul/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.consul/ -title: discovery.consul description: Learn about discovery.consul +title: discovery.consul --- # discovery.consul @@ -50,7 +51,7 @@ Name | Type | Description | Default | Required At most one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. @@ -69,6 +70,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -81,19 +83,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -166,3 +168,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.consul` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.consulagent.md b/docs/sources/flow/reference/components/discovery.consulagent.md index 2fd4209a1979..df923fed4496 100644 --- a/docs/sources/flow/reference/components/discovery.consulagent.md +++ b/docs/sources/flow/reference/components/discovery.consulagent.md @@ -1,7 +1,10 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.consulagent/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.consulagent/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.consulagent/ -title: discovery.consulagent description: Learn about discovery.consulagent +title: discovery.consulagent --- # discovery.consulagent @@ -50,7 +53,7 @@ The following blocks are supported inside the definition of ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -126,3 +129,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.consulagent` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.digitalocean.md b/docs/sources/flow/reference/components/discovery.digitalocean.md index b49c570e32d7..2a64ba7f6bec 100644 --- a/docs/sources/flow/reference/components/discovery.digitalocean.md +++ b/docs/sources/flow/reference/components/discovery.digitalocean.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.digitalocean/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.digitalocean/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.digitalocean/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.digitalocean/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.digitalocean/ -title: discovery.digitalocean description: Learn about discovery.digitalocean +title: discovery.digitalocean --- # discovery.digitalocean @@ -45,8 +46,7 @@ Exactly one of the [`bearer_token`](#arguments) and [`bearer_token_file`](#argum [arguments]: #arguments ## Blocks -The `discovery.digitalocean` component does not support any blocks, and is configured -fully through arguments. +The `discovery.digitalocean` component does not support any blocks, and is configured fully through arguments. ## Exported fields @@ -119,3 +119,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.digitalocean` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.dns.md b/docs/sources/flow/reference/components/discovery.dns.md index 43497f6d3970..d2f0217b1d73 100644 --- a/docs/sources/flow/reference/components/discovery.dns.md +++ b/docs/sources/flow/reference/components/discovery.dns.md @@ -1,12 +1,13 @@ --- aliases: -- /docs/agent/latest/flow/reference/components/discovery.dns +- /docs/agent/latest/flow/reference/components/discovery.dns/ - /docs/grafana-cloud/agent/flow/reference/components/discovery.dns/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.dns/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.dns/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.dns/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.dns/ -title: discovery.dns description: Learn about discovery.dns +title: discovery.dns --- # discovery.dns @@ -92,4 +93,21 @@ prometheus.remote_write "demo" { Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - - `PASSWORD`: The password to use for authentication to the remote_write API. \ No newline at end of file + - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.dns` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.docker.md b/docs/sources/flow/reference/components/discovery.docker.md index 0501f64a81c1..4d6ce94d557f 100644 --- a/docs/sources/flow/reference/components/discovery.docker.md +++ b/docs/sources/flow/reference/components/discovery.docker.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.docker/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.docker/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.docker/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.docker/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.docker/ -title: discovery.docker description: Learn about discovery.docker +title: discovery.docker --- # discovery.docker @@ -40,7 +41,7 @@ Name | Type | Description | Default | Required At most one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. @@ -59,6 +60,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -88,19 +90,19 @@ documentation for the list of supported filters and their meaning. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -214,3 +216,20 @@ Replace the following: > **NOTE**: This example requires the "Expose daemon on tcp://localhost:2375 > without TLS" setting to be enabled in the Docker Engine settings. + + + +## Compatible components + +`discovery.docker` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.dockerswarm.md b/docs/sources/flow/reference/components/discovery.dockerswarm.md index fbaa15c18a71..58c065fb06eb 100644 --- a/docs/sources/flow/reference/components/discovery.dockerswarm.md +++ b/docs/sources/flow/reference/components/discovery.dockerswarm.md @@ -1,11 +1,12 @@ --- aliases: - - /docs/grafana-cloud/agent/flow/reference/components/discovery.dockerswarm/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.dockerswarm/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.dockerswarm/ +- /docs/grafana-cloud/agent/flow/reference/components/discovery.dockerswarm/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.dockerswarm/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.dockerswarm/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.dockerswarm/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.dockerswarm/ -title: discovery.dockerswarm description: Learn about discovery.dockerswarm +title: discovery.dockerswarm --- # discovery.dockerswarm @@ -47,6 +48,7 @@ The following blocks are supported inside the definition of | authorization | [authorization][] | Configure generic authorization to the endpoint. | no | | oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no | | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | +| tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -77,19 +79,19 @@ The following arguments can be used to configure a filter. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -237,3 +239,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.dockerswarm` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.ec2.md b/docs/sources/flow/reference/components/discovery.ec2.md index fd926e8c4a40..7f01ae48c6e0 100644 --- a/docs/sources/flow/reference/components/discovery.ec2.md +++ b/docs/sources/flow/reference/components/discovery.ec2.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.ec2/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ec2/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ec2/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ec2/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ec2/ -title: discovery.ec2 description: Learn about discovery.ec2 +title: discovery.ec2 --- # discovery.ec2 @@ -35,6 +36,18 @@ Name | Type | Description | Default | Required `role_arn` | `string` | AWS Role Amazon Resource Name (ARN), an alternative to using AWS API keys. | | no `refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no `port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no + + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. ## Blocks @@ -43,9 +56,21 @@ The following blocks are supported inside the definition of Hierarchy | Block | Description | Required --------- | ----- | ----------- | -------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no filter | [filter][] | Filters discoverable resources. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no [filter]: #filter-block +[authorization]: #authorization-block +[oauth2]: #oauth2-block +[tls_config]: #tls_config-block + +### authorization block + +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### filter block @@ -61,6 +86,14 @@ Refer to the [Filter API AWS EC2 documentation][filter api] for the list of supp [filter api]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Filter.html +### oauth2 block + +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} + +### tls_config block + +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} + ## Exported fields The following fields are exported and can be referenced by other components: @@ -133,3 +166,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.ec2` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.eureka.md b/docs/sources/flow/reference/components/discovery.eureka.md index d1971de54442..70ab3f8f666d 100644 --- a/docs/sources/flow/reference/components/discovery.eureka.md +++ b/docs/sources/flow/reference/components/discovery.eureka.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.eureka/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.eureka/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.eureka/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.eureka/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.eureka/ -title: discovery.eureka description: Learn about discovery.eureka +title: discovery.eureka --- # discovery.eureka @@ -31,8 +32,20 @@ Name | Type | Description `server` | `string` | Eureka server URL. | | yes `refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `30s` | no `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no `follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. + +[arguments]: #arguments + ## Blocks The following blocks are supported inside the definition of `discovery.eureka`: @@ -43,6 +56,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -55,19 +69,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -139,3 +153,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.eureka` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.file.md b/docs/sources/flow/reference/components/discovery.file.md index 168e529a468a..c8493e01e62a 100644 --- a/docs/sources/flow/reference/components/discovery.file.md +++ b/docs/sources/flow/reference/components/discovery.file.md @@ -3,14 +3,15 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.file/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.file/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.file/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.file/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.file/ -title: discovery.file description: Learn about discovery.file +title: discovery.file --- # discovery.file -> **NOTE:** In `v0.35.0` of the Grafana Agent, the `discovery.file` component was renamed to [local.file_match][], +> **NOTE:** In {{< param "PRODUCT_ROOT_NAME" >}} `v0.35.0`, the `discovery.file` component was renamed to [local.file_match][], > and `discovery.file` was repurposed to discover scrape targets from one or more files. > >
@@ -171,4 +172,21 @@ prometheus.remote_write "demo" { Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - - `PASSWORD`: The password to use for authentication to the remote_write API. \ No newline at end of file + - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.file` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.gce.md b/docs/sources/flow/reference/components/discovery.gce.md index c15222c4763e..5752a4ce51b1 100644 --- a/docs/sources/flow/reference/components/discovery.gce.md +++ b/docs/sources/flow/reference/components/discovery.gce.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.gce/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.gce/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.gce/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.gce/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.gce/ -title: discovery.gce description: Learn about discovery.gce +title: discovery.gce --- # discovery.gce @@ -111,4 +112,21 @@ prometheus.remote_write "demo" { Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - - `PASSWORD`: The password to use for authentication to the remote_write API. \ No newline at end of file + - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.gce` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.hetzner.md b/docs/sources/flow/reference/components/discovery.hetzner.md index 940589dbe576..c6922e685f66 100644 --- a/docs/sources/flow/reference/components/discovery.hetzner.md +++ b/docs/sources/flow/reference/components/discovery.hetzner.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.hetzner/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.hetzner/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.hetzner/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.hetzner/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.hetzner/ -title: discovery.hetzner description: Learn about discovery.hetzner +title: discovery.hetzner --- # discovery.hetzner @@ -61,6 +62,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -73,19 +75,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -175,3 +177,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.hetzner` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.http.md b/docs/sources/flow/reference/components/discovery.http.md index 6b2ed6a19d28..50ecf42dcc06 100644 --- a/docs/sources/flow/reference/components/discovery.http.md +++ b/docs/sources/flow/reference/components/discovery.http.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.http/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.http/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.http/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.http/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.http/ -title: discovery.http description: Learn about discovery.http +title: discovery.http --- # discovery.http @@ -93,6 +94,20 @@ Name | Type | Description --------------- | ------------------- | ------------------------------------------------------------------------------------------ |---------| -------- `url` | string | URL to scrape | | yes `refresh_interval` | `duration` | How often to refresh targets. | `"60s"` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. + +[arguments]: #arguments ## Blocks @@ -105,6 +120,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -117,19 +133,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -167,3 +183,20 @@ discovery.http "dynamic_targets" { refresh_interval = "15s" } ``` + + + +## Compatible components + +`discovery.http` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.ionos.md b/docs/sources/flow/reference/components/discovery.ionos.md index 23f774a02bbb..1c619a1641ac 100644 --- a/docs/sources/flow/reference/components/discovery.ionos.md +++ b/docs/sources/flow/reference/components/discovery.ionos.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.ionos/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ionos/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ionos/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ionos/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ionos/ -title: discovery.ionos description: Learn about discovery.ionos +title: discovery.ionos --- # discovery.ionos @@ -30,11 +31,22 @@ The following arguments are supported: | ------------------ | ---------- | ------------------------------------------------------------ | ------- | -------- | | `datacenter_id` | `string` | The unique ID of the data center. | | yes | | `refresh_interval` | `duration` | The time after which the servers are refreshed. | `60s` | no | -| `port` | `int` | The port to scrap metrics from. | 80 | no | +| `port` | `int` | The port to scrape metrics from. | 80 | no | +| `bearer_token` | `secret` | Bearer token to authenticate with. | | no | +| `bearer_token_file`| `string` | File containing a bearer token to authenticate with. | | no | | `proxy_url` | `string` | HTTP proxy to proxy requests through. | | no | | `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no | | `follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no | + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. + +[arguments]: #arguments + ## Blocks The following blocks are supported inside the definition of @@ -46,6 +58,7 @@ The following blocks are supported inside the definition of | authorization | [authorization][] | Configure generic authorization to the endpoint. | no | | oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no | | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | +| tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -58,19 +71,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -139,3 +152,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.ionos` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.kubelet.md b/docs/sources/flow/reference/components/discovery.kubelet.md index 158e71aa7c7e..7ef29244a01e 100644 --- a/docs/sources/flow/reference/components/discovery.kubelet.md +++ b/docs/sources/flow/reference/components/discovery.kubelet.md @@ -3,11 +3,12 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.kubelet/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kubelet/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kubelet/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubelet/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubelet/ +description: Learn about discovery.kubelet labels: stage: beta title: discovery.kubelet -description: Learn about discovery.kubelet --- # discovery.kubelet @@ -34,7 +35,7 @@ The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`url` | `string` | URL of the Kubelet server. | | no +`url` | `string` | URL of the Kubelet server. | "https://localhost:10250" | no `bearer_token` | `secret` | Bearer token to authenticate with. | | no `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no `refresh_interval` | `duration` | How often the Kubelet should be polled for scrape targets | `5s` | no @@ -48,6 +49,10 @@ One of the following authentication methods must be provided if kubelet authenti The `namespaces` list limits the namespaces to discover resources in. If omitted, all namespaces are searched. +`discovery.kubelet` appends a `/pods` path to `url` to request the available pods. +You can have additional paths in the `url`. +For example, if `url` is `https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy`, then `discovery.kubelet` sends a request on `https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy/pods` + ## Blocks The following blocks are supported inside the definition of @@ -63,11 +68,11 @@ tls_config | [tls_config][] | Configure TLS settings for connecting to the endpo ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -192,3 +197,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.kubelet` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.kubernetes.md b/docs/sources/flow/reference/components/discovery.kubernetes.md index da691c9f17b8..1d4b2f9210c5 100644 --- a/docs/sources/flow/reference/components/discovery.kubernetes.md +++ b/docs/sources/flow/reference/components/discovery.kubernetes.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kubernetes/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubernetes/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubernetes/ -title: discovery.kubernetes description: Learn about discovery.kubernetes +title: discovery.kubernetes --- # discovery.kubernetes @@ -15,7 +16,7 @@ resources. It watches cluster state, and ensures targets are continually synced with what is currently running in your cluster. If you supply no connection information, this component defaults to an -in-cluster config. A kubeconfig file or manual connection settings can be used +in-cluster configuration. A kubeconfig file or manual connection settings can be used to override the defaults. ## Usage @@ -43,7 +44,7 @@ Name | Type | Description | Default | Required At most one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. @@ -258,6 +259,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -278,7 +280,7 @@ omitted, all namespaces are searched. Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`own_namespace` | `bool` | Include the namespace the agent is running in. | | no +`own_namespace` | `bool` | Include the namespace {{< param "PRODUCT_NAME" >}} is running in. | | no `names` | `list(string)` | List of namespaces to search. | | no ### selectors block @@ -321,19 +323,19 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -461,14 +463,20 @@ Replace the following: ### Limit to only pods on the same node -This example limits the search to pods on the same node as this Grafana Agent. This configuration could be useful if you are running the Agent as a DaemonSet: +This example limits the search to pods on the same node as this {{< param "PRODUCT_ROOT_NAME" >}}. +This configuration could be useful if you are running {{< param "PRODUCT_ROOT_NAME" >}} as a DaemonSet. + +{{% admonition type="note" %}} +This example assumes you have used Helm chart to deploy {{< param "PRODUCT_NAME" >}} in Kubernetes and sets `HOSTNAME` to the Kubernetes host name. +If you have a custom Kubernetes deployment, you must adapt this example to your configuration. +{{% /admonition %}} ```river discovery.kubernetes "k8s_pods" { role = "pod" selectors { role = "pod" - field = "spec.nodeName=" + constants.hostname + field = "spec.nodeName=" + coalesce(env("HOSTNAME"), constants.hostname) } } @@ -488,7 +496,25 @@ prometheus.remote_write "demo" { } } ``` + Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.kubernetes` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.kuma.md b/docs/sources/flow/reference/components/discovery.kuma.md index 682edaca51f3..c498753f58ab 100644 --- a/docs/sources/flow/reference/components/discovery.kuma.md +++ b/docs/sources/flow/reference/components/discovery.kuma.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.kuma/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kuma/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kuma/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kuma/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kuma/ -title: discovery.kuma description: Learn about discovery.kuma +title: discovery.kuma --- # discovery.kuma @@ -53,6 +54,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -65,19 +67,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -134,3 +136,20 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.kuma` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.lightsail.md b/docs/sources/flow/reference/components/discovery.lightsail.md index 3b1c98fa9ce3..81bdb0c706b9 100644 --- a/docs/sources/flow/reference/components/discovery.lightsail.md +++ b/docs/sources/flow/reference/components/discovery.lightsail.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.lightsail/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.lightsail/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.lightsail/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.lightsail/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.lightsail/ -title: discovery.lightsail description: Learn about discovery.lightsail +title: discovery.lightsail --- # discovery.lightsail @@ -98,3 +99,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.lightsail` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.linode.md b/docs/sources/flow/reference/components/discovery.linode.md index 92433a6cccf6..77d01dbdf4e2 100644 --- a/docs/sources/flow/reference/components/discovery.linode.md +++ b/docs/sources/flow/reference/components/discovery.linode.md @@ -1,7 +1,10 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.linode/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.linode/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.linode/ -title: discovery.linode description: Learn about discovery.linode +title: discovery.linode --- # discovery.linode @@ -51,6 +54,7 @@ Hierarchy | Block | Description | Required authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -62,15 +66,15 @@ an `oauth2` block. ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -171,4 +175,21 @@ prometheus.remote_write "demo" { } } } -``` \ No newline at end of file +``` + + + +## Compatible components + +`discovery.linode` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.marathon.md b/docs/sources/flow/reference/components/discovery.marathon.md index 03588dbe2d3a..b19ddb321c2c 100644 --- a/docs/sources/flow/reference/components/discovery.marathon.md +++ b/docs/sources/flow/reference/components/discovery.marathon.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.marathon/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.marathon/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.marathon/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.marathon/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.marathon/ -title: discovery.marathon description: Learn about discovery.marathon +title: discovery.marathon --- # discovery.marathon @@ -55,6 +56,7 @@ The following blocks are supported inside the definition of | authorization | [authorization][] | Configure generic authorization to the endpoint. | no | | oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no | | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | +| tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -67,19 +69,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -144,3 +146,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.marathon` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.nerve.md b/docs/sources/flow/reference/components/discovery.nerve.md index 2f39e6feeac1..1334f6dea8e8 100644 --- a/docs/sources/flow/reference/components/discovery.nerve.md +++ b/docs/sources/flow/reference/components/discovery.nerve.md @@ -1,7 +1,10 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.nerve/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.nerve/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.nerve/ -title: discovery.nerve description: Learn about discovery.nerve +title: discovery.nerve --- # discovery.nerve @@ -94,3 +97,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.nerve` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.nomad.md b/docs/sources/flow/reference/components/discovery.nomad.md index 4797465ae9fb..aebd128bb320 100644 --- a/docs/sources/flow/reference/components/discovery.nomad.md +++ b/docs/sources/flow/reference/components/discovery.nomad.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.nomad/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.nomad/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.nomad/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.nomad/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.nomad/ -title: discovery.nomad description: Learn about discovery.nomad +title: discovery.nomad --- # discovery.nomad @@ -57,6 +58,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -69,19 +71,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -145,3 +147,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.nomad` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.openstack.md b/docs/sources/flow/reference/components/discovery.openstack.md index fea582d7f5cd..83df98d8c41c 100644 --- a/docs/sources/flow/reference/components/discovery.openstack.md +++ b/docs/sources/flow/reference/components/discovery.openstack.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.openstack/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.openstack/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.openstack/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.openstack/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.openstack/ -title: discovery.openstack description: Learn about discovery.openstack +title: discovery.openstack --- # discovery.openstack @@ -72,7 +73,7 @@ tls_config | [tls_config][] | TLS configuration for requests to the OpenStack AP ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -156,3 +157,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.openstack` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.ovhcloud.md b/docs/sources/flow/reference/components/discovery.ovhcloud.md new file mode 100644 index 000000000000..453fcb3c1cfc --- /dev/null +++ b/docs/sources/flow/reference/components/discovery.ovhcloud.md @@ -0,0 +1,165 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/reference/components/discovery.ovhcloud/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ovhcloud/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ovhcloud/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ovhcloud/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ovhcloud/ +description: Learn about discovery.ovhcloud +title: discovery.ovhcloud +--- + +# discovery.ovhcloud + +`discovery.ovhcloud` discovers scrape targets from OVHcloud's [dedicated servers][] and [VPS][] using their [API][]. +{{< param "PRODUCT_ROOT_NAME" >}} will periodically check the REST endpoint and create a target for every discovered server. +The public IPv4 address will be used by default - if there's none, the IPv6 address will be used. +This may be changed via relabeling with `discovery.relabel`. +For OVHcloud's [public cloud][] instances you can use `discovery.openstack`. + +[API]: https://api.ovh.com/ +[public cloud]: https://www.ovhcloud.com/en/public-cloud/ +[VPS]: https://www.ovhcloud.com/en/vps/ +[Dedicated servers]: https://www.ovhcloud.com/en/bare-metal/ + +## Usage + +```river +discovery.ovhcloud "LABEL" { + application_key = APPLICATION_KEY + application_secret = APPLICATION_SECRET + consumer_key = CONSUMER_KEY + service = SERVICE +} +``` + +## Arguments + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------ | -------------- | -------------------------------------------------------------- | ------------- | -------- +application_key | `string` | [API][] application key. | | yes +application_secret | `secret` | [API][] application secret. | | yes +consumer_key | `secret` | [API][] consumer key. | | yes +endpoint | `string` | [API][] endpoint. | "ovh-eu" | no +refresh_interval | `duration` | Refresh interval to re-read the resources list. | "60s" | no +service | `string` | Service of the targets to retrieve. | | yes + +`endpoint` must be one of the [supported API endpoints][supported-apis]. + +`service` must be either `vps` or `dedicated_server`. + +[supported-apis]: https://github.com/ovh/go-ovh#supported-apis + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +--------- | ------------------- | ----------- +`targets` | `list(map(string))` | The set of targets discovered from the OVHcloud API. + +Multiple meta labels are available on `targets` and can be used by the `discovery.relabel` component. + +[VPS][] meta labels: +* `__meta_ovhcloud_vps_cluster`: the cluster of the server. +* `__meta_ovhcloud_vps_datacenter`: the datacenter of the server. +* `__meta_ovhcloud_vps_disk`: the disk of the server. +* `__meta_ovhcloud_vps_display_name`: the display name of the server. +* `__meta_ovhcloud_vps_ipv4`: the IPv4 of the server. +* `__meta_ovhcloud_vps_ipv6`: the IPv6 of the server. +* `__meta_ovhcloud_vps_keymap`: the KVM keyboard layout of the server. +* `__meta_ovhcloud_vps_maximum_additional_ip`: the maximum additional IPs of the server. +* `__meta_ovhcloud_vps_memory_limit`: the memory limit of the server. +* `__meta_ovhcloud_vps_memory`: the memory of the server. +* `__meta_ovhcloud_vps_monitoring_ip_blocks`: the monitoring IP blocks of the server. +* `__meta_ovhcloud_vps_name`: the name of the server. +* `__meta_ovhcloud_vps_netboot_mode`: the netboot mode of the server. +* `__meta_ovhcloud_vps_offer_type`: the offer type of the server. +* `__meta_ovhcloud_vps_offer`: the offer of the server. +* `__meta_ovhcloud_vps_state`: the state of the server. +* `__meta_ovhcloud_vps_vcore`: the number of virtual cores of the server. +* `__meta_ovhcloud_vps_version`: the version of the server. +* `__meta_ovhcloud_vps_zone`: the zone of the server. + +[Dedicated servers][] meta labels: +* `__meta_ovhcloud_dedicated_server_commercial_range`: the commercial range of the server. +* `__meta_ovhcloud_dedicated_server_datacenter`: the datacenter of the server. +* `__meta_ovhcloud_dedicated_server_ipv4`: the IPv4 of the server. +* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server. +* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server. +* `__meta_ovhcloud_dedicated_server_name`: the name of the server. +* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server. +* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server. +* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server. +* `__meta_ovhcloud_dedicated_server_server_id`: the ID of the server. +* `__meta_ovhcloud_dedicated_server_state`: the state of the server. +* `__meta_ovhcloud_dedicated_server_support_level`: the support level of the server. + +## Component health + +`discovery.ovhcloud` is only reported as unhealthy when given an invalid +configuration. In those cases, exported fields retain their last healthy +values. + +## Debug information + +`discovery.ovhcloud` does not expose any component-specific debug information. + +## Debug metrics + +`discovery.ovhcloud` does not expose any component-specific debug metrics. + +## Example + +```river +discovery.ovhcloud "example" { + application_key = APPLICATION_KEY + application_secret = APPLICATION_SECRET + consumer_key = CONSUMER_KEY + service = SERVICE +} + +prometheus.scrape "demo" { + targets = discovery.ovhcloud.example.targets + forward_to = [prometheus.remote_write.demo.receiver] +} + +prometheus.remote_write "demo" { + endpoint { + url = PROMETHEUS_REMOTE_WRITE_URL + basic_auth { + username = USERNAME + password = PASSWORD + } + } +} +``` + +Replace the following: + - `APPLICATION_KEY`: The OVHcloud [API][] application key. + - `APPLICATION_SECRET`: The OVHcloud [API][] application secret. + - `CONSUMER_KEY`: The OVHcloud [API][] consumer key. + - `SERVICE`: The OVHcloud service of the targets to retrieve. + - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. + - `USERNAME`: The username to use for authentication to the remote_write API. + - `PASSWORD`: The password to use for authentication to the remote_write API. + + + + +## Compatible components + +`discovery.ovhcloud` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.puppetdb.md b/docs/sources/flow/reference/components/discovery.puppetdb.md index fd910de40590..a83d8454723c 100644 --- a/docs/sources/flow/reference/components/discovery.puppetdb.md +++ b/docs/sources/flow/reference/components/discovery.puppetdb.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.puppetdb/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.puppetdb/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.puppetdb/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.puppetdb/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.puppetdb/ -title: discovery.puppetdb description: Learn about discovery.puppetdb +title: discovery.puppetdb --- # discovery.puppetdb @@ -63,6 +64,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -75,19 +77,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -155,3 +157,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.puppetdb` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.relabel.md b/docs/sources/flow/reference/components/discovery.relabel.md index 7e7b42b5381c..fb0928359273 100644 --- a/docs/sources/flow/reference/components/discovery.relabel.md +++ b/docs/sources/flow/reference/components/discovery.relabel.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.relabel/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.relabel/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.relabel/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.relabel/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.relabel/ -title: discovery.relabel description: Learn about discovery.relabel +title: discovery.relabel --- # discovery.relabel @@ -71,7 +72,7 @@ rule | [rule][] | Relabeling rules to apply to targets. | no ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} ## Exported fields @@ -122,3 +123,23 @@ discovery.relabel "keep_backend_only" { ``` + + +## Compatible components + +`discovery.relabel` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) + +`discovery.relabel` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.scaleway.md b/docs/sources/flow/reference/components/discovery.scaleway.md index 699658c51007..fc3ec8867212 100644 --- a/docs/sources/flow/reference/components/discovery.scaleway.md +++ b/docs/sources/flow/reference/components/discovery.scaleway.md @@ -1,7 +1,10 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.scaleway/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.scaleway/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.scaleway/ -title: discovery.scaleway description: Learn about discovery.scaleway +title: discovery.scaleway --- # discovery.scaleway @@ -72,7 +75,7 @@ an `oauth2` block. ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -171,3 +174,20 @@ Replace the following: * `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. * `USERNAME`: The username to use for authentication to the remote_write API. * `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.scaleway` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.serverset.md b/docs/sources/flow/reference/components/discovery.serverset.md index 9b56697fdcf6..7eb43b5ee11d 100644 --- a/docs/sources/flow/reference/components/discovery.serverset.md +++ b/docs/sources/flow/reference/components/discovery.serverset.md @@ -1,7 +1,10 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.serverset/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.serverset/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.serverset/ -title: discovery.serverset description: Learn about discovery.serverset +title: discovery.serverset --- # discovery.serverset @@ -92,3 +95,20 @@ prometheus.remote_write "default" { } } ``` + + + +## Compatible components + +`discovery.serverset` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.triton.md b/docs/sources/flow/reference/components/discovery.triton.md index 4a6817968ea0..f48ae7f65b17 100644 --- a/docs/sources/flow/reference/components/discovery.triton.md +++ b/docs/sources/flow/reference/components/discovery.triton.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.triton/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.triton/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.triton/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.triton/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.triton/ -title: discovery.triton description: Learn about discovery.triton +title: discovery.triton --- # discovery.triton @@ -58,7 +59,7 @@ tls_config | [tls_config][] | TLS configuration for requests to the Triton API. ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -128,3 +129,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`discovery.triton` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.uyuni.md b/docs/sources/flow/reference/components/discovery.uyuni.md index 6be45ab5f7e3..42b77e8952b6 100644 --- a/docs/sources/flow/reference/components/discovery.uyuni.md +++ b/docs/sources/flow/reference/components/discovery.uyuni.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/discovery.uyuni/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.uyuni/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.uyuni/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.uyuni/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.uyuni/ -title: discovery.uyuni description: Learn about discovery.uyuni +title: discovery.uyuni --- # discovery.uyuni @@ -53,7 +54,7 @@ tls_config | [tls_config][] | TLS configuration for requests to the Uyuni API. | ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -123,3 +124,19 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + +## Compatible components + +`discovery.uyuni` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/faro.receiver.md b/docs/sources/flow/reference/components/faro.receiver.md index 2bc7c8e38ffe..3c15253f126a 100644 --- a/docs/sources/flow/reference/components/faro.receiver.md +++ b/docs/sources/flow/reference/components/faro.receiver.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/faro.receiver/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/faro.receiver/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/faro.receiver/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/faro.receiver/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/faro.receiver/ -title: faro.receiver description: Learn about the faro.receiver +title: faro.receiver --- # faro.receiver @@ -266,3 +267,22 @@ Replace the following: [loki.write]: {{< relref "./loki.write.md" >}} [otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} + + + +## Compatible components + +`faro.receiver` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/local.file.md b/docs/sources/flow/reference/components/local.file.md index 5494f4104e6a..5e935a0bbbf5 100644 --- a/docs/sources/flow/reference/components/local.file.md +++ b/docs/sources/flow/reference/components/local.file.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/local.file/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/local.file/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/local.file/ -title: local.file description: Learn about local.file +title: local.file --- # local.file @@ -38,9 +39,9 @@ Name | Type | Description | Default | Required `poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no `is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no -[secret]: {{< relref "../../config-language/expressions/types_and_values.md#secrets" >}} +[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/local.file_match.md b/docs/sources/flow/reference/components/local.file_match.md index 0ab8a05e988f..8c3ff3a43062 100644 --- a/docs/sources/flow/reference/components/local.file_match.md +++ b/docs/sources/flow/reference/components/local.file_match.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/local.file_match/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file_match/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/local.file_match/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file_match/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/local.file_match/ -title: local.file_match description: Learn about local.file_match +title: local.file_match --- # local.file_match @@ -144,3 +145,24 @@ Replace the following: - `LOKI_URL`: The URL of the Loki server to send logs to. - `USERNAME`: The username to use for authentication to the Loki API. - `PASSWORD`: The password to use for authentication to the Loki API. + + + +## Compatible components + +`local.file_match` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) + +`local.file_match` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.echo.md b/docs/sources/flow/reference/components/loki.echo.md index 205af6899d7c..756ffa00ee18 100644 --- a/docs/sources/flow/reference/components/loki.echo.md +++ b/docs/sources/flow/reference/components/loki.echo.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.echo/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.echo/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.echo/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.echo/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.echo/ +description: Learn about loki.echo labels: stage: beta title: loki.echo -description: Learn about loki.echo --- # loki.echo -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `loki.echo` receives log entries from other `loki` components and prints them to the process' standard output (stdout). @@ -66,3 +67,20 @@ loki.source.file "logs" { loki.echo "example" { } ``` + + + +## Compatible components + +`loki.echo` has exports that can be consumed by the following components: + +- Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/flow/reference/components/loki.process.md index f21104263026..c2793abbfe2e 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/flow/reference/components/loki.process.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.process/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.process/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.process/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.process/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.process/ -title: loki.process description: Learn about loki.process +title: loki.process --- # loki.process @@ -1537,7 +1538,7 @@ The following arguments are supported: | ---------------- | ------------- | -------------------------------------------------- | ------- | -------- | | `db` | `string` | Path to the Maxmind DB file. | | yes | | `source` | `string` | IP from extracted data to parse. | | yes | -| `db_type` | `string` | Maxmind DB type. Allowed values are "city", "asn". | | no | +| `db_type` | `string` | Maxmind DB type. Allowed values are "city", "asn", "country". | | no | | `custom_lookups` | `map(string)` | Key-value pairs of JMESPath expressions. | | no | @@ -1561,6 +1562,7 @@ loki.process "example" { values = { geoip_city_name = "", geoip_country_name = "", + geoip_country_code = "", geoip_continent_name = "", geoip_continent_code = "", geoip_location_latitude = "", @@ -1581,6 +1583,7 @@ The extracted data from the IP used in this example: - geoip_city_name: Kansas City - geoip_country_name: United States +- geoip_country_code: US - geoip_continent_name: North America - geoip_continent_code: NA - geoip_location_latitude: 39.1027 @@ -1621,6 +1624,42 @@ The extracted data from the IP used in this example: - geoip_autonomous_system_number: 396982 - geoip_autonomous_system_organization: GOOGLE-CLOUD-PLATFORM +#### GeoIP with Country database example: + +``` +{"log":"log message","client_ip":"34.120.177.193"} + +loki.process "example" { + stage.json { + expressions = {ip = "client_ip"} + } + + stage.geoip { + source = "ip" + db = "/path/to/db/GeoLite2-Country.mmdb" + db_type = "country" + } + + stage.labels { + values = { + geoip_country_name = "", + geoip_country_code = "", + geoip_continent_name = "", + geoip_continent_code = "", + } + } +} +``` + +The `json` stage extracts the IP address from the `client_ip` key in the log line. +Then the extracted `ip` value is given as source to geoip stage. The geoip stage performs a lookup on the IP and populates the following fields in the shared map which are added as labels using the `labels` stage. + +The extracted data from the IP used in this example: + +- geoip_country_name: United States +- geoip_country_code: US +- geoip_continent_name: North America +- geoip_continent_code: NA #### GeoIP with custom fields example @@ -1693,3 +1732,23 @@ loki.process "local" { } } ``` + + +## Compatible components + +`loki.process` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + +`loki.process` has exports that can be consumed by the following components: + +- Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.relabel.md b/docs/sources/flow/reference/components/loki.relabel.md index 4ce55943284e..4344af151b22 100644 --- a/docs/sources/flow/reference/components/loki.relabel.md +++ b/docs/sources/flow/reference/components/loki.relabel.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.relabel/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.relabel/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.relabel/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.relabel/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.relabel/ -title: loki.relabel description: Learn about loki.relabel +title: loki.relabel --- # loki.relabel @@ -66,7 +67,7 @@ rule | [rule][] | Relabeling rules to apply to received log entries. | no ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block-logs.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/rule-block-logs.md" source="agent" version="" >}} ## Exported fields @@ -111,3 +112,23 @@ loki.relabel "keep_error_only" { } ``` + + +## Compatible components + +`loki.relabel` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + +`loki.relabel` has exports that can be consumed by the following components: + +- Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.api.md b/docs/sources/flow/reference/components/loki.source.api.md index 8fddd8a3cdb1..afc2f3dad112 100644 --- a/docs/sources/flow/reference/components/loki.source.api.md +++ b/docs/sources/flow/reference/components/loki.source.api.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.api/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.api/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.api/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.api/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.api/ -title: loki.source.api description: Learn about loki.source.api +title: loki.source.api --- # loki.source.api @@ -23,7 +24,7 @@ The HTTP API exposed is compatible with [Loki push API][loki-push-api] and the ` loki.source.api "LABEL" { http { listen_address = "LISTEN_ADDRESS" - listen_port = PORT + listen_port = PORT } forward_to = RECEIVER_LIST } @@ -31,10 +32,10 @@ loki.source.api "LABEL" { The component will start HTTP server on the configured port and address with the following endpoints: -- `/loki/api/v1/push` - accepting `POST` requests compatible with [Loki push API][loki-push-api], for example, from another Grafana Agent's [`loki.write`][loki.write] component. -- `/loki/api/v1/raw` - accepting `POST` requests with newline-delimited log lines in body. This can be used to send NDJSON or plaintext logs. This is compatible with promtail's push API endpoint - see [promtail's documentation][promtail-push-api] for more information. NOTE: when this endpoint is used, the incoming timestamps cannot be used and the `use_incoming_timestamp = true` setting will be ignored. +- `/loki/api/v1/push` - accepting `POST` requests compatible with [Loki push API][loki-push-api], for example, from another {{< param "PRODUCT_ROOT_NAME" >}}'s [`loki.write`][loki.write] component. +- `/loki/api/v1/raw` - accepting `POST` requests with newline-delimited log lines in body. This can be used to send NDJSON or plaintext logs. This is compatible with promtail's push API endpoint - see [promtail's documentation][promtail-push-api] for more information. NOTE: when this endpoint is used, the incoming timestamps cannot be used and the `use_incoming_timestamp = true` setting will be ignored. - `/loki/ready` - accepting `GET` requests - can be used to confirm the server is reachable and healthy. -- `/api/v1/push` - internally reroutes to `/loki/api/v1/push` +- `/api/v1/push` - internally reroutes to `/loki/api/v1/push` - `/api/v1/raw` - internally reroutes to `/loki/api/v1/raw` @@ -44,12 +45,12 @@ The component will start HTTP server on the configured port and address with the `loki.source.api` supports the following arguments: - Name | Type | Description | Default | Required ---------------------------|----------------------|------------------------------------------------------------|---------|---------- - `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes - `use_incoming_timestamp` | `bool` | Whether or not to use the timestamp received from request. | `false` | no - `labels` | `map(string)` | The labels to associate with each received logs record. | `{}` | no - `relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no +Name | Type | Description | Default | Required +-------------------------|----------------------|------------------------------------------------------------|---------|--------- +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`use_incoming_timestamp` | `bool` | Whether or not to use the timestamp received from request. | `false` | no +`labels` | `map(string)` | The labels to associate with each received logs record. | `{}` | no +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no The `relabel_rules` field can make use of the `rules` export value from a [`loki.relabel`][loki.relabel] component to apply one or more relabeling rules to log entries before they're forwarded to the list of receivers in `forward_to`. @@ -60,15 +61,15 @@ The `relabel_rules` field can make use of the `rules` export value from a The following blocks are supported inside the definition of `loki.source.api`: - Hierarchy | Name | Description | Required ------------|----------|----------------------------------------------------|---------- - `http` | [http][] | Configures the HTTP server that receives requests. | no +Hierarchy | Name | Description | Required +----------|----------|----------------------------------------------------|--------- +`http` | [http][] | Configures the HTTP server that receives requests. | no [http]: #http ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} ## Exported fields @@ -116,3 +117,20 @@ loki.source.api "loki_push_api" { } ``` + + +## Compatible components + +`loki.source.api` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.awsfirehose.md b/docs/sources/flow/reference/components/loki.source.awsfirehose.md index d0e2a9f175db..86bf634e395a 100644 --- a/docs/sources/flow/reference/components/loki.source.awsfirehose.md +++ b/docs/sources/flow/reference/components/loki.source.awsfirehose.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.awsfirehose/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.awsfirehose/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.awsfirehose/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.awsfirehose/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.awsfirehose/ -title: loki.source.awsfirehose description: Learn about loki.source.awsfirehose +title: loki.source.awsfirehose --- # loki.source.awsfirehose @@ -101,11 +102,11 @@ The following blocks are supported inside the definition of `loki.source.awsfire ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} ## Exported fields @@ -195,3 +196,20 @@ loki.relabel "logging_origin" { forward_to = [] } ``` + + +## Compatible components + +`loki.source.awsfirehose` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md index 67755dc05a51..fcbe22aa4880 100644 --- a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md +++ b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.azure_event_hubs/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.azure_event_hubs/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.azure_event_hubs/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.azure_event_hubs/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.azure_event_hubs/ -title: loki.source.azure_event_hubs description: Learn about loki.source.azure_event_hubs +title: loki.source.azure_event_hubs --- # loki.source.azure_event_hubs @@ -133,4 +134,20 @@ loki.write "example" { url = "loki:3100/api/v1/push" } } -``` \ No newline at end of file +``` + +## Compatible components + +`loki.source.azure_event_hubs` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.cloudflare.md b/docs/sources/flow/reference/components/loki.source.cloudflare.md index a24508a7b236..cee51de6a541 100644 --- a/docs/sources/flow/reference/components/loki.source.cloudflare.md +++ b/docs/sources/flow/reference/components/loki.source.cloudflare.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.cloudflare/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.cloudflare/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.cloudflare/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.cloudflare/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.cloudflare/ -title: loki.source.cloudflare description: Learn about loki.source.cloudflare +title: loki.source.cloudflare --- # loki.source.cloudflare @@ -208,3 +209,20 @@ loki.write "local" { } } ``` + + +## Compatible components + +`loki.source.cloudflare` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.docker.md b/docs/sources/flow/reference/components/loki.source.docker.md index 82ffff474b44..cbf77163d646 100644 --- a/docs/sources/flow/reference/components/loki.source.docker.md +++ b/docs/sources/flow/reference/components/loki.source.docker.md @@ -1,12 +1,13 @@ --- aliases: -- /docs/agent/latest/flow/reference/components/loki.source.docker +- /docs/agent/latest/flow/reference/components/loki.source.docker/ - /docs/grafana-cloud/agent/flow/reference/components/loki.source.docker/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.docker/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.docker/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.docker/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.docker/ -title: loki.source.docker description: Learn about loki.source.docker +title: loki.source.docker --- # loki.source.docker @@ -152,3 +153,22 @@ loki.write "local" { } } ``` + + + +## Compatible components + +`loki.source.docker` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.file.md b/docs/sources/flow/reference/components/loki.source.file.md index 812e5c92c7fc..4583018d90a6 100644 --- a/docs/sources/flow/reference/components/loki.source.file.md +++ b/docs/sources/flow/reference/components/loki.source.file.md @@ -1,11 +1,12 @@ --- aliases: - - /docs/grafana-cloud/agent/flow/reference/components/loki.source.file/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.file/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.file/ +- /docs/grafana-cloud/agent/flow/reference/components/loki.source.file/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.file/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.file/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.file/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.file/ -title: loki.source.file description: Learn about loki.source.file +title: loki.source.file --- # loki.source.file @@ -233,3 +234,22 @@ loki.write "local" { ``` [IANA encoding]: https://www.iana.org/assignments/character-sets/character-sets.xhtml + + + +## Compatible components + +`loki.source.file` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.gcplog.md b/docs/sources/flow/reference/components/loki.source.gcplog.md index c29e1e7c340a..2ce88f73f398 100644 --- a/docs/sources/flow/reference/components/loki.source.gcplog.md +++ b/docs/sources/flow/reference/components/loki.source.gcplog.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.gcplog/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.gcplog/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.gcplog/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.gcplog/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.gcplog/ -title: loki.source.gcplog description: Learn about loki.source.gcplog +title: loki.source.gcplog --- # loki.source.gcplog @@ -117,11 +118,11 @@ The `labels` map is applied to every entry that passes through the component. ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} ## Exported fields @@ -192,3 +193,20 @@ loki.write "local" { } } ``` + + +## Compatible components + +`loki.source.gcplog` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.gelf.md b/docs/sources/flow/reference/components/loki.source.gelf.md index 0ef04bcc084f..ac5796051be5 100644 --- a/docs/sources/flow/reference/components/loki.source.gelf.md +++ b/docs/sources/flow/reference/components/loki.source.gelf.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.gelf/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.gelf/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.gelf/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.gelf/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.gelf/ -title: loki.source.gelf description: Learn about loki.source.gelf +title: loki.source.gelf --- # loki.source.gelf @@ -88,3 +89,20 @@ loki.write "endpoint" { } } ``` + + +## Compatible components + +`loki.source.gelf` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.heroku.md b/docs/sources/flow/reference/components/loki.source.heroku.md index 6fe1065afdaf..8f2c01cea68c 100644 --- a/docs/sources/flow/reference/components/loki.source.heroku.md +++ b/docs/sources/flow/reference/components/loki.source.heroku.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.heroku/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.heroku/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.heroku/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.heroku/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.heroku/ -title: loki.source.heroku description: Learn about loki.source.heroku +title: loki.source.heroku --- # loki.source.heroku @@ -67,11 +68,11 @@ The following blocks are supported inside the definition of `loki.source.heroku` ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} ## Labels @@ -143,3 +144,20 @@ loki.write "local" { } } ``` + + +## Compatible components + +`loki.source.heroku` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.journal.md b/docs/sources/flow/reference/components/loki.source.journal.md index 4b8c6941b130..0448bd572d74 100644 --- a/docs/sources/flow/reference/components/loki.source.journal.md +++ b/docs/sources/flow/reference/components/loki.source.journal.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.journal/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.journal/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.journal/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.journal/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.journal/ -title: loki.source.journal description: Learn about loki.source.journal +title: loki.source.journal --- # loki.source.journal @@ -100,3 +101,20 @@ loki.write "endpoint" { } } ``` + + +## Compatible components + +`loki.source.journal` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.kafka.md b/docs/sources/flow/reference/components/loki.source.kafka.md index a93e870cdabd..eb5e04217298 100644 --- a/docs/sources/flow/reference/components/loki.source.kafka.md +++ b/docs/sources/flow/reference/components/loki.source.kafka.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.kafka/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kafka/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kafka/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kafka/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kafka/ -title: loki.source.kafka description: Learn about loki.source.kafka +title: loki.source.kafka --- # loki.source.kafka @@ -106,7 +107,7 @@ you must set the `tls_config` block. If `"sasl"` is used, you must set the `sasl ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### sasl_config block @@ -172,3 +173,21 @@ loki.write "local" { } } ``` + + + +## Compatible components + +`loki.source.kafka` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes.md b/docs/sources/flow/reference/components/loki.source.kubernetes.md index 48db37ab6e70..e9d19237aef6 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes.md +++ b/docs/sources/flow/reference/components/loki.source.kubernetes.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kubernetes/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kubernetes/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes/ +description: Learn about loki.source.kubernetes labels: stage: experimental title: loki.source.kubernetes -description: Learn about loki.source.kubernetes --- # loki.source.kubernetes -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `loki.source.kubernetes` tails logs from Kubernetes containers using the Kubernetes API. It has the following benefits over `loki.source.file`: @@ -20,7 +21,7 @@ Kubernetes API. It has the following benefits over `loki.source.file`: * It works without a privileged container. * It works without a root user. * It works without needing access to the filesystem of the Kubernetes node. -* It doesn't require a DaemonSet to collect logs, so one agent could collect +* It doesn't require a DaemonSet to collect logs, so one {{< param "PRODUCT_ROOT_NAME" >}} could collect logs for the whole cluster. > **NOTE**: Because `loki.source.kubernetes` uses the Kubernetes API to tail @@ -82,7 +83,7 @@ client > authorization | [authorization][] | Configure generic authorization to client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -99,7 +100,7 @@ inside a `client` block. The `client` block configures the Kubernetes client used to tail logs from containers. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -123,19 +124,19 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### clustering (beta) @@ -143,11 +144,11 @@ Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes -When the agent is [using clustering][], and `enabled` is set to true, then this +When {{< param "PRODUCT_ROOT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `loki.source.kubernetes` component instance opts-in to participating in the cluster to distribute the load of log collection between all cluster nodes. -If the agent is _not_ running in clustered mode, then the block is a no-op and +If {{< param "PRODUCT_ROOT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op and `loki.source.kubernetes` collects logs from every target it receives in its arguments. @@ -198,3 +199,22 @@ loki.write "local" { } } ``` + + + +## Compatible components + +`loki.source.kubernetes` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md b/docs/sources/flow/reference/components/loki.source.kubernetes_events.md index 68168544d994..4447a915cfae 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md +++ b/docs/sources/flow/reference/components/loki.source.kubernetes_events.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.kubernetes_events/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kubernetes_events/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kubernetes_events/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kubernetes_events/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes_events/ -title: loki.source.kubernetes_events description: Learn about loki.source.kubernetes_events +title: loki.source.kubernetes_events --- # loki.source.kubernetes_events @@ -46,9 +47,9 @@ By default, the generated log lines will be in the `logfmt` format. Use the `log_format` argument to change it to `json`. These formats are also names of LogQL parsers, which can be used for processing the logs. -> **NOTE**: When watching all namespaces, Grafana Agent must have permissions +> **NOTE**: When watching all namespaces, {{< param "PRODUCT_NAME" >}} must have permissions > to watch events at the cluster scope (such as using a ClusterRoleBinding). If -> an explicit list of namespaces is provided, Grafana Agent only needs +> an explicit list of namespaces is provided, {{< param "PRODUCT_NAME" >}} only needs > permissions to watch events for those namespaces. Log lines generated by `loki.source.kubernetes_events` have the following @@ -95,7 +96,7 @@ inside a `client` block. The `client` block configures the Kubernetes client used to tail logs from containers. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -119,19 +120,19 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -170,3 +171,20 @@ loki.write "local" { } } ``` + + +## Compatible components + +`loki.source.kubernetes_events` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.podlogs.md b/docs/sources/flow/reference/components/loki.source.podlogs.md index f5f1b00fbd73..5e957c6ead09 100644 --- a/docs/sources/flow/reference/components/loki.source.podlogs.md +++ b/docs/sources/flow/reference/components/loki.source.podlogs.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.podlogs/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.podlogs/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.podlogs/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.podlogs/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.podlogs/ +description: Learn about loki.source.podlogs labels: stage: experimental title: loki.source.podlogs -description: Learn about loki.source.podlogs --- # loki.source.podlogs -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `loki.source.podlogs` discovers `PodLogs` resources on Kubernetes and, using the Kubernetes API, tails logs from Kubernetes containers of Pods specified by @@ -22,8 +23,8 @@ the discovered them. resources rather than being fed targets from another Flow component. > **NOTE**: Unlike `loki.source.kubernetes`, it is not possible to distribute -> responsibility of collecting logs across multiple agents. To avoid collecting -> duplicate logs, only one agent should be running a `loki.source.podlogs` +> responsibility of collecting logs across multiple {{< param "PRODUCT_ROOT_NAME" >}}s. To avoid collecting +> duplicate logs, only one {{< param "PRODUCT_ROOT_NAME" >}} should be running a `loki.source.podlogs` > component. > **NOTE**: Because `loki.source.podlogs` uses the Kubernetes API to tail logs, @@ -61,7 +62,7 @@ The `PodLogs` resource describes a set of Pods to collect logs from. > **NOTE**: `loki.source.podlogs` looks for `PodLogs` of > `monitoring.grafana.com/v1alpha2`, and is not compatible with `PodLogs` from -> the Grafana Agent Operator, which are version `v1alpha1`. +> the {{< param "PRODUCT_ROOT_NAME" >}} Operator, which are version `v1alpha1`. Field | Type | Description ----- | ---- | ----------- @@ -143,7 +144,7 @@ selector | [selector][] | Label selector for which `PodLogs` to discover. | no selector > match_expression | [match_expression][] | Label selector expression for which `PodLogs` to discover. | no namespace_selector | [selector][] | Label selector for which namespaces to discover `PodLogs` in. | no namespace_selector > match_expression | [match_expression][] | Label selector expression for which namespaces to discover `PodLogs` in. | no -clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_ROOT_NAME" >}} is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -162,7 +163,7 @@ inside a `client` block. The `client` block configures the Kubernetes client used to tail logs from containers. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -185,19 +186,19 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### selector block @@ -241,11 +242,11 @@ Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes -When the agent is [using clustering][], and `enabled` is set to true, then this +When {{< param "PRODUCT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `loki.source.podlogs` component instance opts-in to participating in the cluster to distribute the load of log collection between all cluster nodes. -If the agent is _not_ running in clustered mode, then the block is a no-op and +If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op and `loki.source.podlogs` collects logs based on every PodLogs resource discovered. [using clustering]: {{< relref "../../concepts/clustering.md" >}} @@ -289,3 +290,20 @@ loki.write "local" { } } ``` + + +## Compatible components + +`loki.source.podlogs` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.syslog.md b/docs/sources/flow/reference/components/loki.source.syslog.md index ebb70010744b..017cc43ee0c5 100644 --- a/docs/sources/flow/reference/components/loki.source.syslog.md +++ b/docs/sources/flow/reference/components/loki.source.syslog.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.syslog/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.syslog/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.syslog/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.syslog/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.syslog/ -title: loki.source.syslog description: Learn about loki.source.syslog +title: loki.source.syslog --- # loki.source.syslog @@ -101,7 +102,7 @@ translated to internal labels in the form of ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -152,3 +153,20 @@ loki.write "local" { } ``` + + +## Compatible components + +`loki.source.syslog` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.source.windowsevent.md b/docs/sources/flow/reference/components/loki.source.windowsevent.md index bf2e0e2c1621..bb41a62cc3eb 100644 --- a/docs/sources/flow/reference/components/loki.source.windowsevent.md +++ b/docs/sources/flow/reference/components/loki.source.windowsevent.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.source.windowsevent/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.windowsevent/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.windowsevent/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.windowsevent/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.windowsevent/ -title: loki.source.windowsevent description: Learn about loki.windowsevent +title: loki.source.windowsevent --- # loki.source.windowsevent @@ -74,3 +75,20 @@ loki.write "endpoint" { } } ``` + + +## Compatible components + +`loki.source.windowsevent` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/loki.write.md b/docs/sources/flow/reference/components/loki.write.md index 7246a66f6897..75aad04f3f2a 100644 --- a/docs/sources/flow/reference/components/loki.write.md +++ b/docs/sources/flow/reference/components/loki.write.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/loki.write/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.write/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.write/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.write/ -title: loki.write description: Learn about loki.write +title: loki.write --- # loki.write @@ -116,19 +117,19 @@ enabled, the retry mechanism will be governed by the backoff configuration speci ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### queue_config block (experimental) @@ -154,17 +155,18 @@ following two mechanisms: `min_read_frequency` and `max_read_frequency`. The WAL is located inside a component-specific directory relative to the -storage path Grafana Agent is configured to use. See the +storage path {{< param "PRODUCT_NAME" >}} is configured to use. See the [`agent run` documentation][run] for how to change the storage path. The following arguments are supported: Name | Type | Description | Default | Required --------------------- |------------|--------------------------------------------------------------------------------------------------------------------|-----------| -------- -`enabled` | `bool` | Whether to enable the WAL. | false | no +`enabled` | `bool` | Whether to enable the WAL. | false | no `max_segment_age` | `duration` | Maximum time a WAL segment should be allowed to live. Segments older than this setting will be eventually deleted. | `"1h"` | no `min_read_frequency` | `duration` | Minimum backoff time in the backup read mechanism. | `"250ms"` | no `max_read_frequency` | `duration` | Maximum backoff time in the backup read mechanism. | `"1s"` | no +`drain_timeout` | `duration` | Maximum time the WAL drain procedure can take, before being forcefully stopped. | `"30s"` | no [run]: {{< relref "../cli/run.md" >}} @@ -232,3 +234,20 @@ loki.write "default" { `loki.write` uses [snappy](https://en.wikipedia.org/wiki/Snappy_(compression)) for compression. Any labels that start with `__` will be removed before sending to the endpoint. + + + +## Compatible components + +`loki.write` has exports that can be consumed by the following components: + +- Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md index b87a8dc2e589..d5ba0e340255 100644 --- a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md +++ b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/mimir.rules.kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/mimir.rules.kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/mimir.rules.kubernetes/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/mimir.rules.kubernetes/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/mimir.rules.kubernetes/ +description: Learn about mimir.rules.kubernetes labels: stage: beta title: mimir.rules.kubernetes -description: Learn about mimir.rules.kubernetes --- # mimir.rules.kubernetes -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `mimir.rules.kubernetes` discovers `PrometheusRule` Kubernetes resources and loads them into a Mimir instance. @@ -46,18 +47,18 @@ mimir.rules.kubernetes "LABEL" { `mimir.rules.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required --------------------------|------------|----------------------------------------------------------|---------|--------- -`address` | `string` | URL of the Mimir ruler. | | yes -`tenant_id` | `string` | Mimir tenant ID. | | no -`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no -`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no -`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple agent deployments. | "agent" | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +Name | Type | Description | Default | Required +-------------------------|------------|---------------------------------------------------------------------------------|---------|--------- +`address` | `string` | URL of the Mimir ruler. | | yes +`tenant_id` | `string` | Mimir tenant ID. | | no +`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no +`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no +`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no At most one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -77,7 +78,7 @@ differently. Updates are processed as events from the Kubernetes API server according to the informer pattern. The `mimir_namespace_prefix` argument can be used to separate the rules managed -by multiple agent deployments across your infrastructure. It should be set to a +by multiple {{< param "PRODUCT_NAME" >}} deployments across your infrastructure. It should be set to a unique value for each deployment. ## Blocks @@ -143,19 +144,19 @@ The `values` argument must not be provided when `operator` is set to `"Exists"` ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/module.file.md b/docs/sources/flow/reference/components/module.file.md index 41ea97532d1d..0e4b8b19d249 100644 --- a/docs/sources/flow/reference/components/module.file.md +++ b/docs/sources/flow/reference/components/module.file.md @@ -3,18 +3,19 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/module.file/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.file/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.file/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.file/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.file/ +description: Learn about module.file labels: stage: beta title: module.file -description: Learn about module.file --- # module.file -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} -`module.file` is a *module loader* component. A module loader is a Grafana Agent Flow +`module.file` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. `module.file` simplifies the configurations for modules loaded from a file by embedding @@ -50,9 +51,9 @@ Name | Type | Description | Default | Required `poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no `is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no -[secret]: {{< relref "../../config-language/expressions/types_and_values.md#secrets" >}} +[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} ## Blocks diff --git a/docs/sources/flow/reference/components/module.git.md b/docs/sources/flow/reference/components/module.git.md index 6f0867f5fdcc..44bdee36a034 100644 --- a/docs/sources/flow/reference/components/module.git.md +++ b/docs/sources/flow/reference/components/module.git.md @@ -3,18 +3,19 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/module.git/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.git/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.git/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.git/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.git/ +description: Learn about module.git labels: stage: beta title: module.git -description: Learn about module.git --- # module.git -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} -`module.git` is a *module loader* component. A module loader is a Grafana Agent Flow +`module.git` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. `module.git` retrieves a module source from a file in a Git repository. @@ -77,7 +78,7 @@ arguments | [arguments][] | Arguments to pass to the module. | no ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### ssh_key block diff --git a/docs/sources/flow/reference/components/module.http.md b/docs/sources/flow/reference/components/module.http.md index a2a198a2a321..5b1692fdc4d4 100644 --- a/docs/sources/flow/reference/components/module.http.md +++ b/docs/sources/flow/reference/components/module.http.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/module.http/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.http/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.http/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.http/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.http/ +description: Learn about module.http labels: stage: beta title: module.http -description: Learn about module.http --- # module.http -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `module.http` is a [module loader][] component. @@ -51,7 +52,7 @@ Name | Type | Description | Default | Required `poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no `is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no -[secret]: {{< relref "../../config-language/expressions/types_and_values.md#secrets" >}} +[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} ## Blocks diff --git a/docs/sources/flow/reference/components/module.string.md b/docs/sources/flow/reference/components/module.string.md index 7b9284e2f4c1..ef8c5e0b886e 100644 --- a/docs/sources/flow/reference/components/module.string.md +++ b/docs/sources/flow/reference/components/module.string.md @@ -3,18 +3,19 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/module.string/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.string/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.string/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.string/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.string/ +description: Learn about module.string labels: stage: beta title: module.string -description: Learn about module.string --- # module.string -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} -`module.string` is a *module loader* component. A module loader is a Grafana Agent Flow +`module.string` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. [module]: {{< relref "../../concepts/modules.md" >}} diff --git a/docs/sources/flow/reference/components/otelcol.auth.basic.md b/docs/sources/flow/reference/components/otelcol.auth.basic.md index 94906d4f87dc..885eb53f09fa 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.basic.md +++ b/docs/sources/flow/reference/components/otelcol.auth.basic.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.basic/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.basic/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.basic/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.basic/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.basic/ -title: otelcol.auth.basic description: Learn about otelcol.auth.basic +title: otelcol.auth.basic --- # otelcol.auth.basic diff --git a/docs/sources/flow/reference/components/otelcol.auth.bearer.md b/docs/sources/flow/reference/components/otelcol.auth.bearer.md index 9f3ba25c0c6a..718789603b49 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.bearer.md +++ b/docs/sources/flow/reference/components/otelcol.auth.bearer.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.bearer/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.bearer/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.bearer/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.bearer/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.bearer/ -title: otelcol.auth.bearer description: Learn about otelcol.auth.bearer +title: otelcol.auth.bearer --- # otelcol.auth.bearer diff --git a/docs/sources/flow/reference/components/otelcol.auth.headers.md b/docs/sources/flow/reference/components/otelcol.auth.headers.md index b07b9a79373e..bd93a9045e31 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.headers.md +++ b/docs/sources/flow/reference/components/otelcol.auth.headers.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.headers/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.headers/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.headers/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.headers/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.headers/ -title: otelcol.auth.headers description: Learn about otelcol.auth.headers +title: otelcol.auth.headers --- # otelcol.auth.headers @@ -73,13 +74,6 @@ The `value` attribute sets the value of the header directly. Alternatively, `from_context` can be used to dynamically retrieve the header value from request metadata. -> **NOTE**: It is not possible to use `from_context` to get the header value if -> [the `otelcol.processor.batch` component][otelcol.processor.batch] is used to -> batch before data is sent to the component referencing -> `otelcol.auth.headers`. - -[otelcol.processor.batch]: {{< relref "./otelcol.processor.batch.md" >}} - ## Exported fields The following fields are exported and can be referenced by other components: diff --git a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md b/docs/sources/flow/reference/components/otelcol.auth.oauth2.md index b4f0cdd686e9..4584f47eb78d 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md +++ b/docs/sources/flow/reference/components/otelcol.auth.oauth2.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.oauth2/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.oauth2/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.oauth2/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.oauth2/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.oauth2/ -title: otelcol.auth.oauth2 description: Learn about otelcol.auth.oauth2 +title: otelcol.auth.oauth2 --- # otelcol.auth.oauth2 @@ -63,7 +64,7 @@ tls | [tls][] | TLS settings for the token client. | no The `tls` block configures TLS settings used for connecting to the token client. If the `tls` block isn't provided, TLS won't be used for communication. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.auth.sigv4.md b/docs/sources/flow/reference/components/otelcol.auth.sigv4.md index 1c21d0c5320f..e4fc91df2832 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.sigv4.md +++ b/docs/sources/flow/reference/components/otelcol.auth.sigv4.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.sigv4/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.sigv4/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.sigv4/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.sigv4/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.sigv4/ -title: otelcol.auth.sigv4 description: Learn about otelcol.auth.sigv4 +title: otelcol.auth.sigv4 --- # otelcol.auth.sigv4 diff --git a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md index 7c87977142aa..ab3e55b5521f 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md +++ b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md @@ -1,14 +1,17 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.servicegraph/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.servicegraph/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.servicegraph/ +description: Learn about otelcol.connector.servicegraph labels: stage: experimental title: otelcol.connector.servicegraph -description: Learn about otelcol.connector.servicegraph --- # otelcol.connector.servicegraph -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `otelcol.connector.servicegraph` accepts span data from other `otelcol` components and outputs metrics representing the relationship between various services in a system. @@ -132,11 +135,11 @@ The `store` block configures the in-memory store for spans. Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `max_items` | `number` | Maximum number of items to keep in the store. | `1000` | no -`ttl` | `duration` | The time to live for spans in the store. | `"2ms"` | no +`ttl` | `duration` | The time to live for spans in the store. | `"2s"` | no ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} ## Exported fields @@ -216,4 +219,23 @@ Some of the metrics in Mimir may look like this: ``` traces_service_graph_request_total{client="shop-backend",failed="false",server="article-service",client_http_method="DELETE",server_http_method="DELETE"} traces_service_graph_request_failed_total{client="shop-backend",client_http_method="POST",failed="false",server="auth-service",server_http_method="POST"} -``` \ No newline at end of file +``` + +## Compatible components + +`otelcol.connector.servicegraph` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.connector.servicegraph` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md b/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md index 91e5332b0a98..5811b64b7733 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.connector.spanlogs/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.spanlogs/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.connector.spanlogs/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.spanlogs/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.spanlogs/ -title: otelcol.connector.spanlogs description: Learn about otelcol.connector.spanlogs +title: otelcol.connector.spanlogs --- # otelcol.connector.spanlogs @@ -78,7 +79,7 @@ The following attributes are supported: ### output block -{{< docs/shared lookup="flow/reference/components/output-block-logs.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block-logs.md" source="agent" version="" >}} ## Exported fields @@ -278,3 +279,23 @@ For an input trace like this... ] } ``` + + +## Compatible components + +`otelcol.connector.spanlogs` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.connector.spanlogs` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md index 9f210c14734e..c1e887b78c9f 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.connector.spanmetrics/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.spanmetrics/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.connector.spanmetrics/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.spanmetrics/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.spanmetrics/ +description: Learn about otelcol.connector.spanmetrics labels: stage: experimental title: otelcol.connector.spanmetrics -description: Learn about otelcol.connector.spanmetrics --- # otelcol.connector.spanmetrics -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `otelcol.connector.spanmetrics` accepts span data from other `otelcol` components and aggregates Request, Error and Duration (R.E.D) OpenTelemetry metrics from the spans: @@ -171,7 +172,7 @@ The following attributes are supported: ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} ## Exported fields @@ -294,3 +295,23 @@ prometheus.remote_write "mimir" { } } ``` + + +## Compatible components + +`otelcol.connector.spanmetrics` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.connector.spanmetrics` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md index 9b9073dc6501..60480de6677e 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.loadbalancing/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.loadbalancing/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.loadbalancing/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loadbalancing/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.loadbalancing/ +description: Learn about otelcol.exporter.loadbalancing labels: stage: beta title: otelcol.exporter.loadbalancing -description: Learn about otelcol.exporter.loadbalancing --- # otelcol.exporter.loadbalancing -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `otelcol.exporter.loadbalancing` accepts logs and traces from other `otelcol` components and writes them over the network using the OpenTelemetry Protocol (OTLP) protocol. @@ -80,6 +81,7 @@ Hierarchy | Block | Description | Required resolver | [resolver][] | Configures discovering the endpoints to export to. | yes resolver > static | [static][] | Static list of endpoints to export to. | no resolver > dns | [dns][] | DNS-sourced list of endpoints to export to. | no +resolver > kubernetes | [kubernetes][] | Kubernetes-sourced list of endpoints to export to. | no protocol | [protocol][] | Protocol settings. Only OTLP is supported at the moment. | no protocol > otlp | [otlp][] | Configures an OTLP exporter. | no protocol > otlp > client | [client][] | Configures the exporter gRPC client. | no @@ -95,6 +97,7 @@ refers to a `static` block defined inside a `resolver` block. [resolver]: #resolver-block [static]: #static-block [dns]: #dns-block +[kubernetes]: #kubernetes-block [protocol]: #protocol-block [otlp]: #otlp-block [client]: #client-block @@ -136,6 +139,26 @@ Name | Type | Description | Default | Required `timeout` | `duration` | Resolver timeout. | `"1s"` | no `port` | `string` | Port to be used with the IP addresses resolved from the DNS hostname. | `"4317"` | no +### kubernetes block + +You can use the `kubernetes` block to load balance across the pods of a Kubernetes service. The Agent will be notified +by the Kubernetes API whenever a new pod is added or removed from the service. + +The following arguments are supported: + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`service` | `string` | Kubernetes service to resolve. | | yes +`ports` | `list(number)` | Ports to use with the IP addresses resolved from `service`. | `[4317]` | no + +If no namespace is specified inside `service`, an attempt will be made to infer the namespace for this Agent. +If this fails, the `default` namespace will be used. + +Each of the ports listed in `ports` will be used with each of the IPs resolved from `service`. + +The "get", "list", and "watch" [roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-example) +must be granted in Kubernetes for the resolver to work. + ### protocol block The `protocol` block configures protocol-related settings for exporting. @@ -163,11 +186,11 @@ Name | Type | Description | Default | Required `authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no `auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} You can configure an HTTP proxy with the following environment variables: @@ -196,7 +219,7 @@ able to handle and proxy HTTP/2 traffic. The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### keepalive block @@ -216,18 +239,18 @@ Name | Type | Description | Default | Required The `queue` block configures an in-memory buffer of batches before data is sent to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} ### retry block The `retry` block configures how failed requests to the gRPC server are retried. -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ## Exported fields @@ -278,3 +301,19 @@ otelcol.exporter.loadbalancing "default" { } } ``` + + +## Compatible components + +`otelcol.exporter.loadbalancing` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.logging.md b/docs/sources/flow/reference/components/otelcol.exporter.logging.md index e99efa07eede..c1e4c8413948 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.logging.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.logging.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.logging/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.logging/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.logging/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.logging/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.logging/ -title: otelcol.exporter.logging description: Learn about otelcol.exporter.logging +title: otelcol.exporter.logging --- # otelcol.exporter.logging @@ -59,7 +60,7 @@ refers to a `tls` block defined inside a `client` block. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ## Exported fields @@ -106,3 +107,19 @@ otelcol.exporter.logging "default" { sampling_thereafter = 1 } ``` + + +## Compatible components + +`otelcol.exporter.logging` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loki.md b/docs/sources/flow/reference/components/otelcol.exporter.loki.md index d9526fba561f..9a314c3b5aae 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loki.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.loki.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.loki/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.loki/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.loki/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loki/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.loki/ -title: otelcol.exporter.loki description: Learn about otelcol.exporter.loki +title: otelcol.exporter.loki --- # otelcol.exporter.loki @@ -156,4 +157,25 @@ loki.write "local" { } ``` -[Prometheus format](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels) \ No newline at end of file +[Prometheus format](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels) + + + +## Compatible components + +`otelcol.exporter.loki` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) + +`otelcol.exporter.loki` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md index 348b73ee5fb1..6236f784d705 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.otlp/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlp/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.otlp/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlp/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.otlp/ -title: otelcol.exporter.otlp description: Learn about otelcol.exporter.otlp +title: otelcol.exporter.otlp --- # otelcol.exporter.otlp @@ -80,11 +81,11 @@ Name | Type | Description | Default | Required `authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no `auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} An HTTP proxy can be configured through the following environment variables: @@ -113,7 +114,7 @@ able to handle and proxy HTTP/2 traffic. The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} > **NOTE**: `otelcol.exporter.otlp` uses gRPC, which does not allow you to send sensitive credentials (like `auth`) over insecure channels. > Sending sensitive credentials over insecure non-TLS connections is supported by non-gRPC exporters such as [otelcol.exporter.otlphttp][]. @@ -138,18 +139,18 @@ Name | Type | Description | Default | Required The `sending_queue` block configures an in-memory buffer of batches before data is sent to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} ### retry_on_failure block The `retry_on_failure` block configures how failed requests to the gRPC server are retried. -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ## Exported fields @@ -213,3 +214,19 @@ otelcol.auth.basic "grafana_cloud_tempo" { password = env("GRAFANA_CLOUD_API_KEY") } ``` + + +## Compatible components + +`otelcol.exporter.otlp` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md index 8782d6b30bea..14d0c5112fad 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.otlphttp/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlphttp/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.otlphttp/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlphttp/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.otlphttp/ -title: otelcol.exporter.otlphttp description: Learn about otelcol.exporter.otlphttp +title: otelcol.exporter.otlphttp --- # otelcol.exporter.otlphttp @@ -90,32 +91,32 @@ Name | Type | Description | Default | Required Setting `disable_keep_alives` to `true` will result in significant overhead establishing a new HTTP(s) connection for every request. Before enabling this option, consider whether changes to idle connection settings can achieve your goal. -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} ### tls block The `tls` block configures TLS settings used for the connection to the HTTP server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### sending_queue block The `sending_queue` block configures an in-memory buffer of batches before data is sent to the HTTP server. -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} ### retry_on_failure block The `retry_on_failure` block configures how failed requests to the HTTP server are retried. -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ## Exported fields @@ -154,3 +155,19 @@ otelcol.exporter.otlphttp "tempo" { } } ``` + + +## Compatible components + +`otelcol.exporter.otlphttp` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md index e3b3c08416a8..3008f22f4353 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.prometheus/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.prometheus/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.prometheus/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.prometheus/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.prometheus/ -title: otelcol.exporter.prometheus description: Learn about otelcol.exporter.prometheus +title: otelcol.exporter.prometheus --- # otelcol.exporter.prometheus @@ -44,7 +45,8 @@ Name | Type | Description | Defaul `include_scope_labels` | `boolean` | Whether to include additional OTLP labels in all metrics. | `true` | no `add_metric_suffixes` | `boolean` | Whether to add type and unit suffixes to metrics names. | `true` | no `gc_frequency` | `duration` | How often to clean up stale metrics from memory. | `"5m"` | no -`forward_to` | `list(receiver)` | Where to forward converted Prometheus metrics. | | yes +`forward_to` | `list(MetricsReceiver)` | Where to forward converted Prometheus metrics. | | yes +`resource_to_telemetry_conversion` | `boolean` | Whether to convert OTel resource attributes to Prometheus labels. | `false` | no By default, OpenTelemetry resources are converted into `target_info` metrics. OpenTelemetry instrumentation scopes are converted into `otel_scope_info` @@ -72,7 +74,6 @@ are forwarded to the `forward_to` argument. The following are dropped during the conversion process: * Metrics that use the delta aggregation temporality -* ExponentialHistogram data points ## Component health @@ -108,3 +109,23 @@ prometheus.remote_write "mimir" { } } ``` + + +## Compatible components + +`otelcol.exporter.prometheus` can accept arguments from the following components: + +- Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) + +`otelcol.exporter.prometheus` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md b/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md index 9ef641fee047..893d38b5911e 100644 --- a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md +++ b/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ +description: Learn about otelcol.extension.jaeger_remote_sampling label: stage: experimental title: otelcol.extension.jaeger_remote_sampling -description: Learn about otelcol.extension.jaeger_remote_sampling --- # otelcol.extension.jaeger_remote_sampling -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `otelcol.extension.jaeger_remote_sampling` serves a specified Jaeger remote sampling document. @@ -211,11 +212,11 @@ Name | Type | Description | Default | Required `authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no `auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} An HTTP proxy can be configured through the following environment variables: @@ -244,7 +245,7 @@ able to handle and proxy HTTP/2 traffic. The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### keepalive client block diff --git a/docs/sources/flow/reference/components/otelcol.processor.attributes.md b/docs/sources/flow/reference/components/otelcol.processor.attributes.md index 39af8475dd3f..ae1b1eafe555 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.attributes.md +++ b/docs/sources/flow/reference/components/otelcol.processor.attributes.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.attributes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.attributes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.attributes/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.attributes/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.attributes/ -title: otelcol.processor.attributes description: Learn about otelcol.processor.attributes +title: otelcol.processor.attributes --- # otelcol.processor.attributes @@ -148,7 +149,7 @@ The supported values for `action` are: The `include` block provides an option to include data being fed into the [action] blocks based on the properties of a span, log, or metric records. -{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} One of the following is also required: * For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified @@ -171,7 +172,7 @@ If you would like to not propagate certain signals to downstream components, consider a processor such as [otelcol.processor.tail_sampling]({{< relref "./otelcol.processor.tail_sampling.md" >}}). {{% /admonition %}} -{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} One of the following is also required: * For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified @@ -186,27 +187,27 @@ For example, adding a `span_names` filter could cause the component to error if ### regexp block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} ### attribute block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} ### resource block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} ### library block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} ### log_severity block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-log-severity-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-log-severity-block.md" source="agent" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -633,3 +634,23 @@ otelcol.processor.attributes "default" { } } ``` + + +## Compatible components + +`otelcol.processor.attributes` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.attributes` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.batch.md b/docs/sources/flow/reference/components/otelcol.processor.batch.md index 5216d88259fb..7a8eff522ff5 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.batch.md +++ b/docs/sources/flow/reference/components/otelcol.processor.batch.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.batch/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.batch/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.batch/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.batch/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.batch/ -title: otelcol.processor.batch description: Learn about otelcol.processor.batch +title: otelcol.processor.batch --- # otelcol.processor.batch @@ -58,18 +59,30 @@ following events happens: * The duration specified by `timeout` elapses since the time the last batch was sent. -* The number of spans, log lines, or metric samples processed exceeds the - number specified by `send_batch_size`. +* The number of spans, log lines, or metric samples processed is greater than + or equal to the number specified by `send_batch_size`. -Use `send_batch_max_size` to limit the amount of data contained in a single -batch. When set to `0`, batches can be any size. +Logs, traces, and metrics are processed independently. +For example, if `send_batch_size` is set to `1000`: +* The processor may, at the same time, buffer 1,000 spans, + 1,000 log lines, and 1,000 metric samples before flushing them. +* If there are enough spans for a batch of spans (1,000 or more), but not enough for a + batch of metric samples (less than 1,000) then only the spans will be flushed. + +Use `send_batch_max_size` to limit the amount of data contained in a single batch: +* When set to `0`, batches can be any size. +* When set to a non-zero value, `send_batch_max_size` must be greater than or equal to `send_batch_size`. + Every batch will contain up to the `send_batch_max_size` number of spans, log lines, or metric samples. + The excess spans, log lines, or metric samples will not be lost - instead, they will be added to + the next batch. For example, assume `send_batch_size` is set to the default `8192` and there -are currently 8000 batched spans. If the batch processor receives 8000 more -spans at once, the total batch size would be 16,192 which would then be flushed -as a single batch. `send_batch_max_size` constrains how big a batch can get. -When set to a non-zero value, `send_batch_max_size` must be greater or equal to -`send_batch_size`. +are currently 8,000 batched spans. If the batch processor receives 8,000 more +spans at once, its behavior depends on how `send_batch_max_size` is configured: +* If `send_batch_max_size` is set to `0`, the total batch size would be 16,000 + which would then be flushed as a single batch. +* If `send_batch_max_size` is set to `10000`, then the total batch size will be + 10,000 and the remaining 6,000 spans will be flushed in a subsequent batch. `metadata_cardinality_limit` applies for the lifetime of the process. @@ -97,7 +110,7 @@ output | [output][] | Configures where to send received telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -150,6 +163,30 @@ otelcol.exporter.otlp "production" { } ``` +### Batching with a timeout + +This example will buffer up to 10,000 spans, metric data points, or log records for up to 10 seconds. +Because `send_batch_max_size` is not set, the batch size may exceed 10,000. + +```river +otelcol.processor.batch "default" { + timeout = "10s" + send_batch_size = 10000 + + output { + metrics = [otelcol.exporter.otlp.production.input] + logs = [otelcol.exporter.otlp.production.input] + traces = [otelcol.exporter.otlp.production.input] + } +} + +otelcol.exporter.otlp "production" { + client { + endpoint = env("OTLP_SERVER_ENDPOINT") + } +} +``` + ### Batching based on metadata Batching by metadata enables support for multi-tenant OpenTelemetry pipelines @@ -190,3 +227,23 @@ otelcol.exporter.otlp "production" { ``` [otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} + + +## Compatible components + +`otelcol.processor.batch` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.batch` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.discovery.md b/docs/sources/flow/reference/components/otelcol.processor.discovery.md index 14d36d9e13b3..9d9b7c05e3a3 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.discovery.md +++ b/docs/sources/flow/reference/components/otelcol.processor.discovery.md @@ -3,17 +3,18 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.discovery/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.discovery/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.discovery/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.discovery/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.discovery/ -title: otelcol.processor.discovery description: Learn about otelcol.processor.discovery +title: otelcol.processor.discovery --- # otelcol.processor.discovery `otelcol.processor.discovery` accepts traces telemetry data from other `otelcol` -components. It can be paired with `discovery.*` components, which supply a list +components. It can be paired with `discovery.*` components, which supply a list of labels for each discovered target. -`otelcol.processor.discovery` adds resource attributes to spans which have a hostname +`otelcol.processor.discovery` adds resource attributes to spans which have a hostname matching the one in the `__address__` label provided by the `discovery.*` component. {{% admonition type="note" %}} @@ -25,22 +26,22 @@ Multiple `otelcol.processor.discovery` components can be specified by giving the different labels. {{% admonition type="note" %}} -It can be difficult to follow [OpenTelemetry semantic conventions][OTEL sem conv] when +It can be difficult to follow [OpenTelemetry semantic conventions][OTEL sem conv] when adding resource attributes via `otelcol.processor.discovery`: -* `discovery.relabel` and most `discovery.*` processes such as `discovery.kubernetes` +* `discovery.relabel` and most `discovery.*` processes such as `discovery.kubernetes` can only emit [Prometheus-compatible labels][Prometheus data model]. -* Prometheus labels use underscores (`_`) in labels names, whereas +* Prometheus labels use underscores (`_`) in labels names, whereas [OpenTelemetry semantic conventions][OTEL sem conv] use dots (`.`). * Although `otelcol.processor.discovery` is able to work with non-Prometheus labels - such as ones containing dots, the fact that `discovery.*` components are generally - only compatible with Prometheus naming conventions makes it hard to follow OpenTelemetry + such as ones containing dots, the fact that `discovery.*` components are generally + only compatible with Prometheus naming conventions makes it hard to follow OpenTelemetry semantic conventions in `otelcol.processor.discovery`. -If your use case is to add resource attributes which contain Kubernetes metadata, +If your use case is to add resource attributes which contain Kubernetes metadata, consider using `otelcol.processor.k8sattributes` instead. ------ -The main use case for `otelcol.processor.discovery` is for users who migrate to Grafana Agent Flow mode +The main use case for `otelcol.processor.discovery` is for users who migrate to {{< param "PRODUCT_NAME" >}} from Static mode's `prom_sd_operation_type`/`prom_sd_pod_associations` [configuration options][Traces]. [Prometheus data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels @@ -105,7 +106,7 @@ output | [output][] | Configures where to send received telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} ## Exported fields @@ -190,3 +191,25 @@ otelcol.processor.discovery "default" { } } ``` + + + +## Compatible components + +`otelcol.processor.discovery` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.discovery` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/otelcol.processor.filter.md b/docs/sources/flow/reference/components/otelcol.processor.filter.md index d941665b2d38..49a11028a80c 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.filter.md +++ b/docs/sources/flow/reference/components/otelcol.processor.filter.md @@ -3,11 +3,12 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.filter/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.filter/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.filter/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.filter/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.filter/ +description: Learn about otelcol.processor.filter labels: stage: experimental title: otelcol.processor.filter -description: Learn about otelcol.processor.filter --- # otelcol.processor.filter @@ -109,7 +110,7 @@ output | [output][] | Configures where to send received telemetry data. | ye ### traces block -The `traces` block specifies statements that filter trace telemetry signals. +The `traces` block specifies statements that filter trace telemetry signals. Only one `traces` block can be specified. Name | Type | Description | Default | Required @@ -117,8 +118,7 @@ Name | Type | Description `span` | `list(string)` | List of OTTL statements filtering OTLP spans. | | no `spanevent` | `list(string)` | List of OTTL statements filtering OTLP span events. | | no -The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry -documentation for more information: +The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry documentation for more information: * [OTTL span context][] * [OTTL spanevent context][] @@ -173,7 +173,7 @@ Only one of the statements inside the list of statements has to be satisfied. ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -212,7 +212,7 @@ otelcol.processor.filter "default" { traces { span = [ - "attributes[\"container.name\"] == \"app_container_1\", + "attributes[\"container.name\"] == \"app_container_1\"", ] } @@ -237,7 +237,6 @@ otelcol.processor.filter "default" { error_mode = "ignore" metrics { - context = "resource" metric = [ "name == \"my.metric\" and resource.attributes[\"my_label\"] == \"abc123\"" "type == METRIC_DATA_TYPE_HISTOGRAM" @@ -291,12 +290,37 @@ Some values in the River strings are [escaped][river-strings]: * `\` is escaped with `\\` * `"` is escaped with `\"` -[river-strings]: {{< relref "../../config-language/expressions/types_and_values.md/#strings" >}} +[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} [OTTL]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.85.0/pkg/ottl/README.md +[OTTL span context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottlspan/README.md +[OTTL spanevent context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottlspanevent/README.md +[OTTL metric context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottlmetric/README.md +[OTTL datapoint context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottldatapoint/README.md +[OTTL log context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottllog/README.md [OTTL Converter functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#converters [HasAttrKeyOnDataPoint]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md#hasattrkeyondatapoint [HasAttrOnDataPoint]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md#hasattrondatapoint [OTTL booleans]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.85.0/pkg/ottl#booleans [OTTL math expressions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.85.0/pkg/ottl#math-expressions + + +## Compatible components + +`otelcol.processor.filter` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.filter` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md b/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md index dd06860a4efc..6e16dcebcd48 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md +++ b/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.k8sattributes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.k8sattributes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.k8sattributes/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.k8sattributes/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.k8sattributes/ -title: otelcol.processor.k8sattributes description: Learn about otelcol.processor.k8sattributes +title: otelcol.processor.k8sattributes --- # otelcol.processor.k8sattributes @@ -15,7 +16,7 @@ components and adds Kubernetes metadata to the resource attributes of spans, log {{% admonition type="note" %}} `otelcol.processor.k8sattributes` is a wrapper over the upstream OpenTelemetry -Collector `k8sattributes` processor. If necessary, bug reports or feature requests +Collector `k8sattributes` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. {{% /admonition %}} @@ -53,12 +54,12 @@ Setting `passthrough` to `true` enables the "passthrough mode" of `otelcol.proce * Only a `k8s.pod.ip` resource attribute will be added. * No other metadata will be added. * The Kubernetes API will not be accessed. -* To correctly detect the pod IPs, the Agent must receive spans directly from services. +* To correctly detect the pod IPs, {{< param "PRODUCT_ROOT_NAME" >}} must receive spans directly from services. * The `passthrough` setting is useful when configuring the Agent as a Kubernetes Deployment. -An Agent running as a Deployment cannot detect the IP addresses of pods generating telemetry -data without any of the well-known IP attributes. If the Deployment Agent receives telemetry from -Agents deployed as DaemonSet, then some of those attributes might be missing. As a workaround, -you can configure the DaemonSet Agents with `passthrough` set to `true`. +A {{< param "PRODUCT_ROOT_NAME" >}} running as a Deployment cannot detect the IP addresses of pods generating telemetry +data without any of the well-known IP attributes. If the Deployment {{< param "PRODUCT_ROOT_NAME" >}} receives telemetry from +{{< param "PRODUCT_ROOT_NAME" >}}s deployed as DaemonSet, then some of those attributes might be missing. As a workaround, +you can configure the DaemonSet {{< param "PRODUCT_ROOT_NAME" >}}s with `passthrough` set to `true`. ## Blocks @@ -142,13 +143,13 @@ By default, if `metadata` is not specified, the following fields are extracted a The `annotation` block configures how to extract Kubernetes annotations. -{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} ### label block {#extract-label-block} The `label` block configures how to extract Kubernetes labels. -{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} ### filter block @@ -167,13 +168,13 @@ If `node` is specified, then any pods not running on the specified node will be The `field` block allows you to filter pods by generic Kubernetes fields. -{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} ### label block {#filter-label-block} The `label` block allows you to filter pods by generic Kubernetes labels. -{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} ### pod_association block @@ -236,7 +237,7 @@ Name | Type | Description | Default | Required ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -409,3 +410,23 @@ prometheus.remote_write "mimir" { } } ``` + + +## Compatible components + +`otelcol.processor.k8sattributes` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.k8sattributes` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md b/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md index 08ab82396c32..9d1528adf70d 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md +++ b/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.memory_limiter/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.memory_limiter/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.memory_limiter/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.memory_limiter/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.memory_limiter/ -title: otelcol.processor.memory_limiter description: Learn about otelcol.processor.memory_limiter +title: otelcol.processor.memory_limiter --- # otelcol.processor.memory_limiter @@ -86,7 +87,7 @@ output | [output][] | Configures where to send received telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -108,3 +109,23 @@ configuration. `otelcol.processor.memory_limiter` does not expose any component-specific debug information. + + +## Compatible components + +`otelcol.processor.memory_limiter` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.memory_limiter` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md b/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md index 32bbd8bd15a9..a76c85b2a21b 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md +++ b/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md @@ -1,14 +1,17 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.probabilistic_sampler/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.probabilistic_sampler/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.probabilistic_sampler/ +description: Learn about telcol.processor.probabilistic_sampler labels: stage: experimental title: otelcol.processor.probabilistic_sampler -description: Learn about telcol.processor.probabilistic_sampler --- # otelcol.processor.probabilistic_sampler -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `otelcol.processor.probabilistic_sampler` accepts logs and traces data from other otelcol components and applies probabilistic sampling based on configuration options. @@ -142,3 +145,23 @@ otelcol.processor.probabilistic_sampler "default" { } } ``` + + +## Compatible components + +`otelcol.processor.probabilistic_sampler` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.probabilistic_sampler` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.span.md b/docs/sources/flow/reference/components/otelcol.processor.span.md index 81ecdacf513e..fe6985881007 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.span.md +++ b/docs/sources/flow/reference/components/otelcol.processor.span.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.span/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.span/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.span/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.span/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.span/ +description: Learn about otelcol.processor.span labels: stage: experimental title: otelcol.processor.span -description: Learn about otelcol.processor.span --- # otelcol.processor.span -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `otelcol.processor.span` accepts traces telemetry data from other `otelcol` components and modifies the names and attributes of the spans. @@ -190,23 +191,23 @@ with a non-empty value for a valid configuration. ### regexp block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} ### attribute block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} ### resource block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} ### library block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} ## Exported fields @@ -387,3 +388,23 @@ otelcol.processor.span "default" { } } ``` + + +## Compatible components + +`otelcol.processor.span` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.span` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md index 97e6072b9731..b6c6ccfdc0f7 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.tail_sampling/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.tail_sampling/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.tail_sampling/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.tail_sampling/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.tail_sampling/ +description: Learn about otelcol.processor.tail_sampling labels: stage: beta title: otelcol.processor.tail_sampling -description: Learn about otelcol.processor.tail_sampling --- # otelcol.processor.tail_sampling -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `otelcol.processor.tail_sampling` samples traces based on a set of defined policies. All spans for a given trace *must* be received by the same collector @@ -307,7 +308,7 @@ Name | Type | Description | Default | Required ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -332,7 +333,7 @@ information. ## Example -This example batches trace data from Grafana Agent before sending it to +This example batches trace data from {{< param "PRODUCT_NAME" >}} before sending it to [otelcol.exporter.otlp][] for further processing. This example shows an impractical number of policies for the purpose of demonstrating how to set up each type. ```river @@ -552,3 +553,23 @@ otelcol.exporter.otlp "production" { } } ``` + + +## Compatible components + +`otelcol.processor.tail_sampling` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.tail_sampling` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.transform.md b/docs/sources/flow/reference/components/otelcol.processor.transform.md index 04be52e99eaf..81967bb11c24 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.transform.md +++ b/docs/sources/flow/reference/components/otelcol.processor.transform.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.transform/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.transform/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.transform/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.transform/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.transform/ +description: Learn about otelcol.processor.transform labels: stage: experimental title: otelcol.processor.transform -description: Learn about otelcol.processor.transform --- # otelcol.processor.transform -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} `otelcol.processor.transform` accepts telemetry data from other `otelcol` components and modifies it using the [OpenTelemetry Transformation Language (OTTL)][OTTL]. @@ -54,8 +55,8 @@ For example, the OTTL statement `set(description, "Sum") where type == "Sum"` ca Raw strings are generally more convenient for writing OTTL statements. -[river-strings]: {{< relref "../../config-language/expressions/types_and_values.md/#strings" >}} -[river-raw-strings]: {{< relref "../../config-language/expressions/types_and_values.md/#raw-strings" >}} +[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} +[river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} {{% /admonition %}} {{% admonition type="note" %}} @@ -250,7 +251,7 @@ span using the `span` context, it is more efficient to use the `resource` contex ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -565,8 +566,8 @@ Each statement is enclosed in backticks instead of quotation marks. This constitutes a [raw string][river-raw-strings], and lets us avoid the need to escape each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] River string. -[river-strings]: {{< relref "../../config-language/expressions/types_and_values.md/#strings" >}} -[river-raw-strings]: {{< relref "../../config-language/expressions/types_and_values.md/#raw-strings" >}} +[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} +[river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} [traces protobuf]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/opentelemetry/proto/trace/v1/trace.proto [metrics protobuf]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/opentelemetry/proto/metrics/v1/metrics.proto @@ -589,3 +590,23 @@ each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] [OTTL metric context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottlmetric/README.md [OTTL datapoint context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottldatapoint/README.md [OTTL log context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottllog/README.md + + +## Compatible components + +`otelcol.processor.transform` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.transform` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md b/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md index 6dd59b2a5d93..c19bb03dba77 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.jaeger/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.jaeger/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.jaeger/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.jaeger/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.jaeger/ -title: otelcol.receiver.jaeger description: Learn about otelcol.receiver.jaeger +title: otelcol.receiver.jaeger --- # otelcol.receiver.jaeger @@ -114,7 +115,7 @@ Name | Type | Description | Default | Required The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### keepalive block @@ -222,11 +223,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -277,3 +278,20 @@ otelcol.exporter.otlp "default" { ## Technical details `otelcol.receiver.jaeger` supports [gzip](https://en.wikipedia.org/wiki/Gzip) for compression. + + +## Compatible components + +`otelcol.receiver.jaeger` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md index 4c332641a990..28588420609d 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.kafka/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.kafka/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.kafka/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.kafka/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.kafka/ -title: otelcol.receiver.kafka description: Learn about otelcol.receiver.kafka +title: otelcol.receiver.kafka --- # otelcol.receiver.kafka @@ -168,7 +169,7 @@ The `tls` block configures TLS settings used for connecting to the Kafka brokers. If the `tls` block isn't provided, TLS won't be used for communication. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### kerberos block @@ -278,11 +279,11 @@ Regular expressions are not allowed in the `headers` argument. Only exact matchi ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -329,3 +330,20 @@ otelcol.exporter.otlp "default" { } } ``` + + +## Compatible components + +`otelcol.receiver.kafka` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.loki.md b/docs/sources/flow/reference/components/otelcol.receiver.loki.md index 1a3a1b234eb6..31d9877da882 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.loki.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.loki.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.loki/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.loki/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.loki/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.loki/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.loki/ +description: Learn about otelcol.receiver.loki labels: stage: beta title: otelcol.receiver.loki -description: Learn about otelcol.receiver.loki --- # otelcol.receiver.loki -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `otelcol.receiver.loki` receives Loki log entries, converts them to the OpenTelemetry logs format, and forwards them to other `otelcol.*` components. @@ -48,7 +49,7 @@ output | [output][] | Configures where to send converted telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -98,3 +99,24 @@ otelcol.exporter.otlp "default" { } } ``` + + + +## Compatible components + +`otelcol.receiver.loki` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.receiver.loki` has exports that can be consumed by the following components: + +- Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md b/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md index 6da48c06fc53..a6d7a5bb3ae3 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.opencensus/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.opencensus/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.opencensus/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.opencensus/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.opencensus/ -title: otelcol.receiver.opencensus description: Learn about otelcol.receiver.opencensus +title: otelcol.receiver.opencensus --- # otelcol.receiver.opencensus @@ -90,7 +91,7 @@ refers to a `tls` block defined inside a `grpc` block. The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### keepalive block @@ -130,11 +131,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -209,3 +210,20 @@ otelcol.exporter.otlp "default" { } } ``` + + +## Compatible components + +`otelcol.receiver.opencensus` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md index 60f1f1748f9c..134098ed2de4 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.otlp/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.otlp/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.otlp/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.otlp/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.otlp/ -title: otelcol.receiver.otlp description: Learn about otelcol.receiver.otlp +title: otelcol.receiver.otlp --- # otelcol.receiver.otlp @@ -186,11 +187,11 @@ If `allowed_headers` includes `"*"`, all headers are permitted. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -247,3 +248,20 @@ otelcol.exporter.otlp "default" { ## Technical details `otelcol.receiver.otlp` supports [gzip](https://en.wikipedia.org/wiki/Gzip) for compression. + + +## Compatible components + +`otelcol.receiver.otlp` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md b/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md index 61c16282551b..d0723aad80c4 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.prometheus/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.prometheus/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.prometheus/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.prometheus/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.prometheus/ +description: Learn about otelcol.receiver.prometheus labels: stage: beta title: otelcol.receiver.prometheus -description: Learn about otelcol.receiver.prometheus --- # otelcol.receiver.prometheus -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `otelcol.receiver.prometheus` receives Prometheus metrics, converts them to the OpenTelemetry metrics format, and forwards them to other `otelcol.*` @@ -49,7 +50,7 @@ output | [output][] | Configures where to send received telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -57,7 +58,7 @@ The following fields are exported and can be referenced by other components: Name | Type | Description ---- | ---- | ----------- -`receiver` | `receiver` | A value that other components can use to send Prometheus metrics to. +`receiver` | `MetricsReceiver` | A value that other components can use to send Prometheus metrics to. ## Component health @@ -80,7 +81,7 @@ endpoint: ```river prometheus.scrape "default" { - // Collect metrics from Grafana Agent's default HTTP listen address. + // Collect metrics from the default HTTP listen address. targets = [{"__address__" = "127.0.0.1:12345"}] forward_to = [otelcol.receiver.prometheus.default.receiver] @@ -98,3 +99,23 @@ otelcol.exporter.otlp "default" { } } ``` + + +## Compatible components + +`otelcol.receiver.prometheus` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.receiver.prometheus` has exports that can be consumed by the following components: + +- Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md new file mode 100644 index 000000000000..11e6a0485e09 --- /dev/null +++ b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md @@ -0,0 +1,240 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.vcenter/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.vcenter/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.vcenter/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.vcenter/ +title: otelcol.receiver.vcenter +description: Learn about otelcol.receiver.vcenter +labels: + stage: experimental +--- + +# otelcol.receiver.vcenter + +{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} + +`otelcol.receiver.vcenter` accepts metrics from a +vCenter or ESXi host running VMware vSphere APIs and +forwards it to other `otelcol.*` components. + +> **NOTE**: `otelcol.receiver.vcenter` is a wrapper over the upstream +> OpenTelemetry Collector `vcenter` receiver from the `otelcol-contrib` +> distribution. Bug reports or feature requests will be redirected to the +> upstream repository, if necessary. + +Multiple `otelcol.receiver.vcenter` components can be specified by giving them +different labels. + +The full list of metrics that can be collected can be found in [vcenter receiver documentation][vcenter metrics]. + +[vcenter metrics]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/receiver/vcenterreceiver/documentation.md + +## Prerequisites + +This receiver has been built to support ESXi and vCenter versions: + +- 7.5 +- 7.0 +- 6.7 + +A “Read Only” user assigned to a vSphere with permissions to the vCenter server, cluster and all subsequent resources being monitored must be specified in order for the receiver to retrieve information about them. + +## Usage + +```river +otelcol.receiver.vcenter "LABEL" { + endpoint = "VCENTER_ENDPOINT" + username = "VCENTER_USERNAME" + password = "VCENTER_PASSWORD" + + output { + metrics = [...] + } +} +``` + +## Arguments + +`otelcol.receiver.vcenter` supports the following arguments: + + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`endpoint` | `string` | Endpoint to a vCenter Server or ESXi host which has the SDK path enabled. | | yes +`username` | `string` | Username to use for authentication. | | yes +`password` | `string` | Password to use for authentication. | | yes +`collection_interval` | `duration` | Defines how often to collect metrics. | `"1m"` | no +`initial_delay` | `duration` | Defines how long this receiver waits before starting. | `"1s"` | no +`timeout` | `duration` | Defines the timeout for the underlying HTTP client. | `"0s"` | no + +`endpoint` has the format `://`. For example, `https://vcsa.hostname.localnet`. + +## Blocks + +The following blocks are supported inside the definition of +`otelcol.receiver.vcenter`: + +Hierarchy | Block | Description | Required +--------- | ----- | ----------- | -------- +tls | [tls][] | Configures TLS for the HTTP client. | no +metrics | [metrics][] | Configures which metrics will be sent to downstream components. | no +resource_attributes | [resource_attributes][] | Configures resource attributes for metrics sent to downstream components. | no +debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no +output | [output][] | Configures where to send received telemetry data. | yes + +[tls]: #tls-block +[debug_metrics]: #debug_metrics-block +[metrics]: #metrics-block +[resource_attributes]: #resource_attributes-block +[output]: #output-block + +### tls block + +The `tls` block configures TLS settings used for a server. If the `tls` block +isn't provided, TLS won't be used for connections to the server. + +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} + +### metrics block + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`vcenter.cluster.cpu.effective` | [metric][] | Enables the `vcenter.cluster.cpu.effective` metric. | `true` | no +`vcenter.cluster.cpu.usage` | [metric][] | Enables the `vcenter.cluster.cpu.usage` metric. | `true` | no +`vcenter.cluster.host.count` | [metric][] | Enables the `vcenter.cluster.host.count` metric. | `true` | no +`vcenter.cluster.memory.effective` | [metric][] | Enables the `vcenter.cluster.memory.effective` metric. | `true` | no +`vcenter.cluster.memory.limit` | [metric][] | Enables the `vcenter.cluster.memory.limit` metric. | `true` | no +`vcenter.cluster.memory.used` | [metric][] | Enables the `vcenter.cluster.memory.used` metric. | `true` | no +`vcenter.cluster.vm.count` | [metric][] | Enables the `vcenter.cluster.vm.count` metric. | `true` | no +`vcenter.datastore.disk.usage` | [metric][] | Enables the `vcenter.datastore.disk.usage` metric. | `true` | no +`vcenter.datastore.disk.utilization` | [metric][] | Enables the `vcenter.datastore.disk.utilization` metric. | `true` | no +`vcenter.host.cpu.usage` | [metric][] | Enables the `vcenter.host.cpu.usage` metric. | `true` | no +`vcenter.host.cpu.utilization` | [metric][] | Enables the `vcenter.host.cpu.utilization` metric. | `true` | no +`vcenter.host.disk.latency.avg` | [metric][] | Enables the `vcenter.host.disk.latency.avg` metric. | `true` | no +`vcenter.host.disk.latency.max` | [metric][] | Enables the `vcenter.host.disk.latency.max` metric. | `true` | no +`vcenter.host.disk.throughput` | [metric][] | Enables the `vcenter.host.disk.throughput` metric. | `true` | no +`vcenter.host.memory.usage` | [metric][] | Enables the `vcenter.host.memory.usage` metric. | `true` | no +`vcenter.host.memory.utilization` | [metric][] | Enables the `vcenter.host.memory.utilization` metric. | `true` | no +`vcenter.host.network.packet.count` | [metric][] | Enables the `vcenter.host.network.packet.count` metric. | `true` | no +`vcenter.host.network.packet.errors` | [metric][] | Enables the `vcenter.host.network.packet.errors` metric. | `true` | no +`vcenter.host.network.throughput` | [metric][] | Enables the `vcenter.host.network.throughput` metric. | `true` | no +`vcenter.host.network.usage` | [metric][] | Enables the `vcenter.host.network.usage` metric. | `true` | no +`vcenter.resource_pool.cpu.shares` | [metric][] | Enables the `vcenter.resource_pool.cpu.shares` metric. | `true` | no +`vcenter.resource_pool.cpu.usage` | [metric][] | Enables the `vcenter.resource_pool.cpu.usage` metric. | `true` | no +`vcenter.resource_pool.memory.shares` | [metric][] | Enables the `vcenter.resource_pool.memory.shares` metric. | `true` | no +`vcenter.resource_pool.memory.usage` | [metric][] | Enables the `vcenter.resource_pool.memory.usage` metric. | `true` | no +`vcenter.vm.cpu.usage` | [metric][] | Enables the `vcenter.vm.cpu.usage` metric. | `true` | no +`vcenter.vm.cpu.utilization` | [metric][] | Enables the `vcenter.vm.cpu.utilization` metric. | `true` | no +`vcenter.vm.disk.latency.avg` | [metric][] | Enables the `vcenter.vm.disk.latency.avg` metric. | `true` | no +`vcenter.vm.disk.latency.max` | [metric][] | Enables the `vcenter.vm.disk.latency.max` metric. | `true` | no +`vcenter.vm.disk.throughput` | [metric][] | Enables the `vcenter.vm.disk.throughput` metric. | `true` | no +`vcenter.vm.disk.usage` | [metric][] | Enables the `vcenter.vm.disk.usage` metric. | `true` | no +`vcenter.vm.disk.utilization` | [metric][] | Enables the `vcenter.vm.disk.utilization` metric. | `true` | no +`vcenter.vm.memory.ballooned` | [metric][] | Enables the `vcenter.vm.memory.ballooned` metric. | `true` | no +`vcenter.vm.memory.swapped` | [metric][] | Enables the `vcenter.vm.memory.swapped` metric. | `true` | no +`vcenter.vm.memory.swapped_ssd` | [metric][] | Enables the `vcenter.vm.memory.swapped_ssd` metric. | `true` | no +`vcenter.vm.memory.usage` | [metric][] | Enables the `vcenter.vm.memory.usage` metric. | `true` | no +`vcenter.vm.memory.utilization` | [metric][] | Enables the `vcenter.vm.memory.utilization` metric. | `false` | no +`vcenter.vm.network.packet.count` | [metric][] | Enables the `vcenter.vm.network.packet.count` metric. | `true` | no +`vcenter.vm.network.throughput` | [metric][] | Enables the `vcenter.vm.network.throughput` metric. | `true` | no +`vcenter.vm.network.usage` | [metric][] | Enables the `vcenter.vm.network.usage` metric. | `true` | no + +[metric]: #metric-block + +#### metric block + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Whether to enable the metric. | `true` | no + + +### resource_attributes block + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`vcenter.cluster.name` | [resource_attribute][] | Enables the `vcenter.cluster.name` resource attribute. | `true` | no +`vcenter.datastore.name` | [resource_attribute][] | Enables the `vcenter.cluster.resource_pool` resource attribute. | `true` | no +`vcenter.host.name` | [resource_attribute][] | Enables the `vcenter.host.name` resource attribute. | `true` | no +`vcenter.resource_pool.inventory_path` | [resource_attribute][] | Enables the `vcenter.resource_pool.inventory_path` resource attribute. | `true` | no +`vcenter.resource_pool.name` | [resource_attribute][] | Enables the `vcenter.resource_pool.name` resource attribute. | `true` | no +`vcenter.vm.id` | [resource_attribute][] | Enables the `vcenter.vm.id` resource attribute. | `true` | no +`vcenter.vm.name` | [resource_attribute][] | Enables the `vcenter.vm.name` resource attribute. | `true` | no + +[resource_attribute]: #resource_attribute-block + +#### resource_attribute block + +Name | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`enabled` | `boolean` | Whether to enable the resource attribute. | `true` | no + + +### debug_metrics block + +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} + +### output block + +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} + +## Exported fields + +`otelcol.receiver.vcenter` does not export any fields. + +## Component health + +`otelcol.receiver.vcenter` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`otelcol.receiver.vcenter` does not expose any component-specific debug +information. + +## Example + +This example forwards received telemetry data through a batch processor before +finally sending it to an OTLP-capable endpoint: + +```river +otelcol.receiver.vcenter "default" { + endpoint = "http://localhost:15672" + username = "otelu" + password = "password" + + output { + metrics = [otelcol.processor.batch.default.input] + } +} + +otelcol.processor.batch "default" { + output { + metrics = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = env("OTLP_ENDPOINT") + } +} +``` + + + +## Compatible components + +`otelcol.receiver.vcenter` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md b/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md index aa361f7a5233..2dd3d8a9ccfb 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.zipkin/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.zipkin/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.zipkin/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.zipkin/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.zipkin/ -title: otelcol.receiver.zipkin description: Learn about otelcol.receiver.zipkin +title: otelcol.receiver.zipkin --- # otelcol.receiver.zipkin @@ -70,7 +71,7 @@ refers to a `tls` block defined inside a `grpc` block. The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### cors block @@ -96,11 +97,11 @@ If `allowed_headers` includes `"*"`, all headers are permitted. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} ## Exported fields @@ -142,3 +143,20 @@ otelcol.exporter.otlp "default" { } } ``` + + +## Compatible components + +`otelcol.receiver.zipkin` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.exporter.agent.md b/docs/sources/flow/reference/components/prometheus.exporter.agent.md index 92ac0b31a860..cb2dd5cda361 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.agent.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.agent.md @@ -1,7 +1,10 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.agent/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.agent/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.agent/ -title: prometheus.exporter.agent description: Learn about prometheus.exporter.agen +title: prometheus.exporter.agent --- # prometheus.exporter.agent @@ -19,7 +22,7 @@ prometheus.exporter.agent "agent" { ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -42,7 +45,7 @@ This example uses a [`prometheus.scrape` component][scrape] to collect metrics from `prometheus.exporter.agent`: ```river -prometheus.exporter.agent "agent" {} +prometheus.exporter.agent "example" {} // Configure a prometheus.scrape component to collect agent metrics. prometheus.scrape "demo" { @@ -66,4 +69,22 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} \ No newline at end of file +[scrape]: {{< relref "./prometheus.scrape.md" >}} + + + + +## Compatible components + +`prometheus.exporter.agent` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.apache.md b/docs/sources/flow/reference/components/prometheus.exporter.apache.md index f38021fab6be..08f19fa2d1d9 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.apache.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.apache.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.apache/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.apache/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.apache/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.apache/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.apache/ -title: prometheus.exporter.apache description: Learn about prometheus.exporter.apache +title: prometheus.exporter.apache --- # prometheus.exporter.apache @@ -33,7 +34,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -86,3 +87,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.apache` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.azure.md b/docs/sources/flow/reference/components/prometheus.exporter.azure.md index 448575287351..ea8fa08cd912 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.azure.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.azure.md @@ -1,28 +1,40 @@ --- aliases: - - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.azure/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.azure/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.azure/ +- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.azure/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.azure/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.azure/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.azure/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.azure/ -title: prometheus.exporter.azure description: Learn about prometheus.exporter.azure +title: prometheus.exporter.azure --- # prometheus.exporter.azure -The `prometheus.exporter.azure` component embeds [`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter) to collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). The exporter uses [Azure Resource Graph](https://azure.microsoft.com/en-us/get-started/azure-portal/resource-graph/#overview) queries to identify resources for gathering metrics. +The `prometheus.exporter.azure` component embeds [`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter) to collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). The exporter supports all metrics defined by Azure Monitor. You can find the complete list of available metrics in the [Azure Monitor documentation](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported). Metrics for this integration are exposed with the template `azure_{type}_{metric}_{aggregation}_{unit}` by default. As an example, the Egress metric for BlobService would be exported as `azure_microsoft_storage_storageaccounts_blobservices_egress_total_bytes`. +The exporter offers the following two options for gathering metrics. + +1. (Default) Use an [Azure Resource Graph](https://azure.microsoft.com/en-us/get-started/azure-portal/resource-graph/#overview) query to identify resources for gathering metrics. + 1. This query will make one API call per resource identified. + 1. Subscriptions with a reasonable amount of resources can hit the [12000 requests per hour rate limit](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling#subscription-and-tenant-limits) Azure enforces. +1. Set the regions to gather metrics from and get metrics for all resources across those regions. + 1. This option will make one API call per subscription, dramatically reducing the number of API calls. + 1. This approach does not work with all resource types, and Azure does not document which resource types do or do not work. + 1. A resource type that is not supported produces errors that look like `Resource type: microsoft.containerservice/managedclusters not enabled for Cross Resource metrics`. + 1. If you encounter one of these errors you must use the default Azure Resource Graph based option to gather metrics. + ## Authentication -Grafana agent must be running in an environment with access to Azure. The exporter uses the Azure SDK for go and supports [authentication](https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure). +{{< param "PRODUCT_NAME" >}} must be running in an environment with access to Azure. The exporter uses the Azure SDK for go and supports [authentication](https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure). -The account used by Grafana Agent needs: +The account used by {{< param "PRODUCT_NAME" >}} needs: -- [Read access to the resources that will be queried by Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/overview#permissions-in-azure-resource-graph) +- When using an Azure Resource Graph query, [read access to the resources that will be queried by Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/overview#permissions-in-azure-resource-graph) - Permissions to call the [Microsoft.Insights Metrics API](https://learn.microsoft.com/en-us/rest/api/monitor/metrics/list) which should be the `Microsoft.Insights/Metrics/Read` permission ## Usage @@ -50,23 +62,27 @@ prometheus.exporter.azure LABEL { You can use the following arguments to configure the exporter's behavior. Omitted fields take their default values. -| Name | Type | Description | Default | Required | -| ----------------------------- | -------------- | -------------------------------------------------------------------- | ----------------------------------------------------------------------------- | -------- | -| `subscriptions` | `list(string)` | List of subscriptions to scrap metrics from. | | yes | -| `resource_type` | `string` | The Azure Resource Type to scrape metrics for. | | yes | -| `metrics` | `list(string)` | The metrics to scrape from resources. | | yes | -| `resource_graph_query_filter` | `string` | The [Kusto query][] filter to apply when searching for resources. | | no | -| `metric_aggregations` | `list(string)` | Aggregations to apply for the metrics produced. | | no | -| `timespan` | `string` | [ISO8601 Duration][] over which the metrics are being queried. | `"PT1M"` (1 minute) | no | -| `included_dimensions` | `list(string)` | List of dimensions to include on the final metrics. | | no | -| `included_resource_tags` | `list(string)` | List of resource tags to include on the final metrics. | `["owner"]` | no | -| `metric_namespace` | `string` | Namespace for `resource_type` which have multiple levels of metrics. | | no | -| `azure_cloud_environment` | `string` | Name of the cloud environment to connect to. | `"azurecloud"` | no | -| `metric_name_template` | `string` | Metric template used to expose the metrics. | `"azure_{type}_{metric}_{aggregation}_{unit}"` | no | -| `metric_help_template` | `string` | Description of the metric. | `"Azure metric {metric} for {type} with aggregation {aggregation} as {unit}"` | no | +| Name | Type | Description | Default | Required | +|-------------------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|----------| +| `subscriptions` | `list(string)` | List of subscriptions to scrape metrics from. | | yes | +| `resource_type` | `string` | The Azure Resource Type to scrape metrics for. | | yes | +| `metrics` | `list(string)` | The metrics to scrape from resources. | | yes | +| `resource_graph_query_filter` | `string` | The [Kusto query][] filter to apply when searching for resources. Can't be used if `regions` is set. | | no | +| `regions` | `list(string)` | The list of regions for gathering metrics and enables gathering metrics for all resources in the subscription. Can't be used if `resource_graph_query_filter` is set. | | no | +| `metric_aggregations` | `list(string)` | Aggregations to apply for the metrics produced. | | no | +| `timespan` | `string` | [ISO8601 Duration][] over which the metrics are being queried. | `"PT1M"` (1 minute) | no | +| `included_dimensions` | `list(string)` | List of dimensions to include on the final metrics. | | no | +| `included_resource_tags` | `list(string)` | List of resource tags to include on the final metrics. | `["owner"]` | no | +| `metric_namespace` | `string` | Namespace for `resource_type` which have multiple levels of metrics. | | no | +| `azure_cloud_environment` | `string` | Name of the cloud environment to connect to. | `"azurecloud"` | no | +| `metric_name_template` | `string` | Metric template used to expose the metrics. | `"azure_{type}_{metric}_{aggregation}_{unit}"` | no | +| `metric_help_template` | `string` | Description of the metric. | `"Azure metric {metric} for {type} with aggregation {aggregation} as {unit}"` | no | +| `validate_dimensions` | `bool` | Enable dimension validation in the azure sdk | `false` | no | The list of available `resource_type` values and their corresponding `metrics` can be found in [Azure Monitor essentials][]. +The list of available `regions` to your subscription can be found by running the azure CLI command `az account list-locations --query '[].name'`. + The `resource_graph_query_filter` can be embedded into a template query of the form `Resources | where type =~ "" | project id, tags`. Valid values for `metric_aggregations` are `minimum`, `maximum`, `average`, `total`, and `count`. If no aggregation is specified, the value is retrieved from the metric. For example, the aggregation value of the metric `Availability` in [Microsoft.ClassicStorage/storageAccounts](https://learn.microsoft.com/en-us/azure/azure-monitor/reference/supported-metrics/microsoft-classicstorage-storageaccounts-metrics) is `average`. @@ -77,13 +93,15 @@ Tags in `included_resource_tags` will be added as labels with the name `tag_}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -106,6 +124,9 @@ debug metrics. prometheus.exporter.azure "example" { subscriptions = SUBSCRIPTIONS resource_type = "Microsoft.Storage/storageAccounts" + regions = [ + "westeurope", + ] metric_namespace = "Microsoft.Storage/storageAccounts/blobServices" metrics = [ "Availability", @@ -119,8 +140,11 @@ prometheus.exporter.azure "example" { "SuccessServerLatency", "Transactions", ] + included_dimensions = [ + "ApiName", + "TransactionType", + ] timespan = "PT1H" - resource_graph_query_filter = "where location == 'westeurope'" } // Configure a prometheus.scrape component to send metrics to. @@ -147,3 +171,20 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + + + +## Compatible components + +`prometheus.exporter.azure` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md b/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md index 4cd6e5effcc3..24fe248d5e23 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.blackbox/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.blackbox/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.blackbox/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.blackbox/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.blackbox/ -title: prometheus.exporter.blackbox description: Learn about prometheus.exporter.blackbox +title: prometheus.exporter.blackbox --- # prometheus.exporter.blackbox @@ -17,6 +18,10 @@ The `prometheus.exporter.blackbox` component embeds ```river prometheus.exporter.blackbox "LABEL" { + target { + name = "example" + address = "EXAMPLE_ADDRESS" + } } ``` @@ -31,6 +36,7 @@ Omitted fields take their default values. | `config` | `string` or `secret` | blackbox_exporter configuration as inline string. | | no | | `probe_timeout_offset` | `duration` | Offset in seconds to subtract from timeout when probing targets. | `"0.5s"` | no | +Either `config_file` or `config` must be specified. The `config_file` argument points to a YAML file defining which blackbox_exporter modules to use. The `config` argument must be a YAML document as string defining which blackbox_exporter modules to use. `config` is typically loaded by using the exports of another component. For example, @@ -55,10 +61,11 @@ The following blocks are supported inside the definition of ### target block The `target` block defines an individual blackbox target. -The `target` block may be specified multiple times to define multiple targets. The label of the block is required and will be used in the target's `job` label. +The `target` block may be specified multiple times to define multiple targets. `name` attribute is required and will be used in the target's `job` label. | Name | Type | Description | Default | Required | | --------- | ---------------- | ----------------------------------- | ------- | -------- | +| `name` | `string` | The name of the target to probe. | | yes | | `address` | `string` | The address of the target to probe. | | yes | | `module` | `string` | Blackbox module to use to probe. | `""` | no | | `labels` | `map(string)` | Labels to add to the target. | | no | @@ -67,7 +74,7 @@ Labels specified in the `labels` argument will not override labels set by `black ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -90,18 +97,22 @@ debug metrics. ### Collect metrics using a blackbox exporter config file This example uses a [`prometheus.scrape` component][scrape] to collect metrics -from `prometheus.exporter.blackbox`. It adds an extra label, `env="dev"`, to the metrics emitted by the `grafana` target. The `example` target does not have any added labels. +from `prometheus.exporter.blackbox`. It adds an extra label, `env="dev"`, to the metrics emitted by the `grafana` target. The `example` target does not have any added labels. + +The `config_file` argument is used to define which `blackbox_exporter` modules to use. You can use the [blackbox example config file](https://github.com/prometheus/blackbox_exporter/blob/master/example.yml). ```river prometheus.exporter.blackbox "example" { config_file = "blackbox_modules.yml" - target "example" { + target { + name = "example" address = "http://example.com" module = "http_2xx" } - target "grafana" { + target { + name = "grafana" address = "http://grafana.com" module = "http_2xx" labels = { @@ -142,12 +153,14 @@ This example is the same above with using an embedded configuration: prometheus.exporter.blackbox "example" { config = "{ modules: { http_2xx: { prober: http, timeout: 5s } } }" - target "example" { + target { + name = "example" address = "http://example.com" module = "http_2xx" } - target "grafana" { + target { + name = "grafana" address = "http://grafana.com" module = "http_2xx" labels = { @@ -182,3 +195,20 @@ Replace the following: [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.blackbox` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md b/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md index 1f4f960d56b0..02c923ebe898 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.cadvisor/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.cadvisor/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.cadvisor/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.cadvisor/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.cadvisor/ -title: prometheus.exporter.cadvisor description: Learn about the prometheus.exporter.cadvisor +title: prometheus.exporter.cadvisor --- # prometheus.exporter.cadvisor @@ -42,6 +43,7 @@ Name | Type | Description | Default | Required `docker_tls_key` | `string` | Path to private key for TLS connection to docker. | `key.pem` | no `docker_tls_ca` | `string` | Path to a trusted CA for TLS connection to docker. | `ca.pem` | no `docker_only` | `bool` | Only report docker containers in addition to root stats. | `false` | no +`disable_root_cgroup_stats` | `bool` | Disable collecting root Cgroup stats. | `false` | no For `allowlisted_container_labels` to take effect, `store_container_labels` must be set to `false`. @@ -69,7 +71,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -124,3 +126,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.cadvisor` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md index 2748ee521adc..2c1682a5fccc 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.cloudwatch/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.cloudwatch/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.cloudwatch/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.cloudwatch/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.cloudwatch/ -title: prometheus.exporter.cloudwatch description: Learn about prometheus.exporter.cloudwatch +title: prometheus.exporter.cloudwatch --- # prometheus.exporter.cloudwatch @@ -23,7 +24,7 @@ two kinds of jobs: [discovery][] and [static][]. ## Authentication -The agent must be running in an environment with access to AWS. The exporter uses +{{< param "PRODUCT_NAME" >}} must be running in an environment with access to AWS. The exporter uses the [AWS SDK for Go](https://aws.github.io/aws-sdk-go-v2/docs/getting-started/) and provides authentication via [AWS's default credential chain](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). @@ -136,19 +137,18 @@ Omitted fields take their default values. You can use the following blocks in`prometheus.exporter.cloudwatch` to configure collector-specific options: -| Hierarchy | Name | Description | Required | -| ------------------ | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -------- | -| discovery | [discovery][] | Configures a discovery job. Multiple jobs can be configured. | no\* | -| discovery > role | [role][] | Configures the IAM roles the job should assume to scrape metrics. Defaults to the role configured in the environment the agent runs on. | no | -| discovery > metric | [metric][] | Configures the list of metrics the job should scrape. Multiple metrics can be defined inside one job. | yes | -| static | [static][] | Configures a static job. Multiple jobs can be configured. | no\* | -| static > role | [role][] | Configures the IAM roles the job should assume to scrape metrics. Defaults to the role configured in the environment the agent runs on. | no | -| static > metric | [metric][] | Configures the list of metrics the job should scrape. Multiple metrics can be defined inside one job. | yes | -| decoupled_scraping | [decoupled_scraping][] | Configures the decoupled scraping feature to retrieve metrics on a schedule and return the cached metrics. | no | +| Hierarchy | Name | Description | Required | +|--------------------|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| discovery | [discovery][] | Configures a discovery job. Multiple jobs can be configured. | no\* | +| discovery > role | [role][] | Configures the IAM roles the job should assume to scrape metrics. Defaults to the role configured in the environment {{< param "PRODUCT_NAME" >}} runs on. | no | +| discovery > metric | [metric][] | Configures the list of metrics the job should scrape. Multiple metrics can be defined inside one job. | yes | +| static | [static][] | Configures a static job. Multiple jobs can be configured. | no\* | +| static > role | [role][] | Configures the IAM roles the job should assume to scrape metrics. Defaults to the role configured in the environment {{< param "PRODUCT_NAME" >}} runs on. | no | +| static > metric | [metric][] | Configures the list of metrics the job should scrape. Multiple metrics can be defined inside one job. | yes | +| decoupled_scraping | [decoupled_scraping][] | Configures the decoupled scraping feature to retrieve metrics on a schedule and return the cached metrics. | no | {{% admonition type="note" %}} -The `static` and `discovery` blocks are marked as not required, but you must configure at least one static or discovery -job. +The `static` and `discovery` blocks are marked as not required, but you must configure at least one static or discovery job. {{% /admonition %}} [discovery]: #discovery-block @@ -161,10 +161,8 @@ job. The `discovery` block allows the component to scrape CloudWatch metrics with only the AWS service and a list of metrics under that service/namespace. -The agent will find AWS resources in the specified service for which to scrape these metrics, label them appropriately, -and -export them to Prometheus. For example, if we wanted to scrape CPU utilization and network traffic metrics from all AWS -EC2 instances: +{{< param "PRODUCT_NAME" >}} will find AWS resources in the specified service for which to scrape these metrics, label them appropriately, +and export them to Prometheus. For example, if we wanted to scrape CPU utilization and network traffic metrics from all AWS EC2 instances: ```river prometheus.exporter.cloudwatch "discover_instances" { @@ -280,11 +278,9 @@ on how to explore metrics, to easily pick the ones you need. #### period and length -`period` controls primarily the width of the time bucket used for aggregating metrics collected from -CloudWatch. `length` -controls how far back in time CloudWatch metrics are considered during each agent scrape. If both settings are -configured, -the time parameters when calling CloudWatch APIs works as follows: +`period` controls primarily the width of the time bucket used for aggregating metrics collected from CloudWatch. `length` +controls how far back in time CloudWatch metrics are considered during each {{< param "PRODUCT_ROOT_NAME" >}} scrape. +If both settings are configured, the time parameters when calling CloudWatch APIs works as follows: ![](https://grafana.com/media/docs/agent/cloudwatch-period-and-length-time-model-2.png) @@ -317,7 +313,7 @@ that corresponds to the credentials configured in the environment will be used. Multiple roles can be useful when scraping metrics from different AWS accounts with a single pair of credentials. In this case, a different role -is configured for the agent to assume before calling AWS APIs. Therefore, the credentials configured in the system need +is configured for {{< param "PRODUCT_ROOT_NAME" >}} to assume before calling AWS APIs. Therefore, the credentials configured in the system need permission to assume the target role. See [Granting a user permissions to switch roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_permissions-to-switch.html) in the AWS IAM documentation for more information about how to configure this. @@ -345,7 +341,7 @@ This feature also prevents component scrape timeouts when you gather high volume ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -433,6 +429,7 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Namespace: `AWS/PrivateLinkEndpoints` or Alias: `vpc-endpoint` - Namespace: `AWS/PrivateLinkServices` or Alias: `vpc-endpoint-service` - Namespace: `AWS/Prometheus` or Alias: `amp` +- Namespace: `AWS/QLDB` or Alias: `qldb` - Namespace: `AWS/RDS` or Alias: `rds` - Namespace: `AWS/Redshift` or Alias: `redshift` - Namespace: `AWS/Route53Resolver` or Alias: `route53-resolver` @@ -446,6 +443,7 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Namespace: `AWS/TransitGateway` or Alias: `tgw` - Namespace: `AWS/TrustedAdvisor` or Alias: `trustedadvisor` - Namespace: `AWS/VPN` or Alias: `vpn` +- Namespace: `AWS/ClientVPN` or Alias: `clientvpn` - Namespace: `AWS/WAFV2` or Alias: `wafv2` - Namespace: `AWS/WorkSpaces` or Alias: `workspaces` - Namespace: `AWS/AOSS` or Alias: `aoss` @@ -456,3 +454,20 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Namespace: `/aws/sagemaker/TransformJobs` or Alias: `sagemaker-transform` - Namespace: `/aws/sagemaker/InferenceRecommendationsJobs` or Alias: `sagemaker-inf-rec` - Namespace: `AWS/Sagemaker/ModelBuildingPipeline` or Alias: `sagemaker-model-building-pipeline` + + + +## Compatible components + +`prometheus.exporter.cloudwatch` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.consul.md b/docs/sources/flow/reference/components/prometheus.exporter.consul.md index d69385db0439..81185047459e 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.consul.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.consul.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.consul/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.consul/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.consul/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.consul/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.consul/ -title: prometheus.exporter.consul description: Learn about prometheus.exporter.consul +title: prometheus.exporter.consul --- # prometheus.exporter.consul @@ -27,7 +28,7 @@ All arguments are optional. Omitted fields take their default values. | Name | Type | Description | Default | Required | | -------------------------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | -------- | -| `server` | `string` | Address (host and port) of the Consul instance we should connect to. This could be a local agent (localhost:8500, for instance), or the address of a Consul server. | `http://localhost:8500` | no | +| `server` | `string` | Address (host and port) of the Consul instance we should connect to. This could be a local {{< param "PRODUCT_ROOT_NAME" >}} (localhost:8500, for instance), or the address of a Consul server. | `http://localhost:8500` | no | | `ca_file` | `string` | File path to a PEM-encoded certificate authority used to validate the authenticity of a server certificate. | | no | | `cert_file` | `string` | File path to a PEM-encoded certificate used with the private key to verify the exporter's authenticity. | | no | | `key_file` | `string` | File path to a PEM-encoded private key used with the certificate to verify the exporter's authenticity. | | no | @@ -43,7 +44,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -96,3 +97,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.consul` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md b/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md index 60df052a07bb..2f22e0048807 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.dnsmasq/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.dnsmasq/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.dnsmasq/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.dnsmasq/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.dnsmasq/ -title: prometheus.exporter.dnsmasq description: Learn about prometheus.exporter.dnsmasq +title: prometheus.exporter.dnsmasq --- # prometheus.exporter.dnsmasq @@ -33,7 +34,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -86,3 +87,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.dnsmasq` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md b/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md index 792d57995928..6feb9c683eeb 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.elasticsearch/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.elasticsearch/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.elasticsearch/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.elasticsearch/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.elasticsearch/ -title: prometheus.exporter.elasticsearch description: Learn about prometheus.exporter.elasticsearch +title: prometheus.exporter.elasticsearch --- # prometheus.exporter.elasticsearch @@ -55,9 +56,24 @@ Omitted fields take their default values. | `data_streams` | `bool` | Export stats for Data Streams. | | no | | `slm` | `bool` | Export stats for SLM (Snapshot Lifecycle Management). | | no | +## Blocks + +The following blocks are supported inside the definition of +`prometheus.exporter.elasticsearch`: + +| Hierarchy | Block | Description | Required | +| ------------------- | ----------------- | -------------------------------------------------------- | -------- | +| basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no | + +[basic_auth]: #basic_auth-block + +### basic_auth block + +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} + ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -83,6 +99,10 @@ from `prometheus.exporter.elasticsearch`: ```river prometheus.exporter.elasticsearch "example" { address = "http://localhost:9200" + basic_auth { + username = USERNAME + password = PASSWORD + } } // Configure a prometheus.scrape component to collect Elasticsearch metrics. @@ -110,3 +130,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.elasticsearch` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md b/docs/sources/flow/reference/components/prometheus.exporter.gcp.md index 9140ae58919c..e9a3d7ab2786 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.gcp.md @@ -3,15 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.gcp/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.gcp/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.gcp/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.gcp/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.gcp/ -title: prometheus.exporter.gcp description: Learn about prometheus.exporter.gcp +title: prometheus.exporter.gcp --- # prometheus.exporter.gcp The `prometheus.exporter.gcp` component embeds [`stackdriver_exporter`](https://github.com/prometheus-community/stackdriver_exporter). -It lets you collect [GCP Cloud Monitoring (formerly stackdriver)](https://cloud.google.com/monitoring/docs), translate them to prometheus-compatible format and remote write. The component supports all metrics available via [GCP's monitoring API](https://cloud.google.com/monitoring/api/metrics_gcp). +It lets you collect [GCP Cloud Monitoring (formerly stackdriver)](https://cloud.google.com/monitoring/docs), translate them to prometheus-compatible format and remote write. +The component supports all metrics available via [GCP's monitoring API](https://cloud.google.com/monitoring/api/metrics_gcp). Metric names follow the template `stackdriver___`. @@ -29,10 +31,10 @@ These attributes result in a final metric name of: ## Authentication -Grafana Agent must be running in an environment with access to the GCP project it is scraping. The exporter +{{< param "PRODUCT_ROOT_NAME" >}} must be running in an environment with access to the GCP project it is scraping. The exporter uses the Google Golang Client Library, which offers a variety of ways to [provide credentials](https://developers.google.com/identity/protocols/application-default-credentials). Choose the option that works best for you. -After deciding how Agent will obtain credentials, ensure the account is set up with the IAM role `roles/monitoring.viewer`. +After deciding how {{< param "PRODUCT_ROOT_NAME" >}} will obtain credentials, ensure the account is set up with the IAM role `roles/monitoring.viewer`. Since the exporter gathers all of its data from [GCP monitoring APIs](https://cloud.google.com/monitoring/api/v3), this is the only permission needed. ## Usage @@ -80,7 +82,7 @@ For `ingest_delay`, you can see the values for this in documented metrics as `Af ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -171,3 +173,20 @@ prometheus.exporter.gcp "lb_subset_with_filter" { ] } ``` + + + +## Compatible components + +`prometheus.exporter.gcp` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.github.md b/docs/sources/flow/reference/components/prometheus.exporter.github.md index aab5f2ceb7dd..753458562ab5 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.github.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.github.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.github/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.github/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.github/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.github/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.github/ -title: prometheus.exporter.github description: Learn about prometheus.exporter.github +title: prometheus.exporter.github --- # prometheus.exporter.github @@ -40,7 +41,7 @@ When provided, `api_token_file` takes precedence over `api_token`. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -94,3 +95,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.github` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md b/docs/sources/flow/reference/components/prometheus.exporter.kafka.md index 901d0f9c2636..59400eea67fe 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.kafka.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.kafka/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.kafka/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.kafka/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.kafka/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.kafka/ -title: prometheus.exporter.kafka description: Learn about prometheus.exporter.kafka +title: prometheus.exporter.kafka --- # prometheus.exporter.kafka @@ -53,7 +54,7 @@ Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -106,3 +107,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.kafka` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md b/docs/sources/flow/reference/components/prometheus.exporter.memcached.md index fceb216a4d6a..bd158d76a996 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.memcached.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.memcached/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.memcached/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.memcached/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.memcached/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.memcached/ -title: prometheus.exporter.memcached description: Learn about prometheus.exporter.memcached +title: prometheus.exporter.memcached --- # prometheus.exporter.memcached @@ -41,11 +42,11 @@ The following blocks are supported inside the definition of `prometheus.exporter ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -98,3 +99,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.memcached` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md b/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md index babc512a2ca2..1aa855542c06 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mongodb/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mongodb/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mongodb/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mongodb/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mongodb/ -title: prometheus.exporter.mongodb description: Learn about prometheus.exporter.mongodb +title: prometheus.exporter.mongodb --- # prometheus.exporter.mongodb @@ -13,11 +14,10 @@ description: Learn about prometheus.exporter.mongodb The `prometheus.exporter.mongodb` component embeds percona's [`mongodb_exporter`](https://github.com/percona/mongodb_exporter). {{% admonition type="note" %}} -For this integration to work properly, you must have connect each node of your MongoDB cluster to an agent instance. -That's because this exporter does not collect metrics from multiple nodes. +This exporter doesn't collect metrics from multiple nodes. For this integration to work properly, you must have connect each node of your MongoDB cluster to a {{< param "PRODUCT_NAME" >}} instance. {{% /admonition %}} -We strongly recommend configuring a separate user for the Grafana Agent, giving it only the strictly mandatory security privileges necessary for monitoring your node. +We strongly recommend configuring a separate user for {{< param "PRODUCT_NAME" >}}, giving it only the strictly mandatory security privileges necessary for monitoring your node. Refer to the [Percona documentation](https://github.com/percona/mongodb_exporter#permissions) for more information. ## Usage @@ -46,7 +46,7 @@ For `tls_basic_auth_config_path`, check [`tls_config`](https://prometheus.io/doc ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -88,3 +88,20 @@ prometheus.remote_write "default" { ``` [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.mongodb` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md index 66384a1aace1..e2bcad76830e 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md @@ -3,15 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mssql/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mssql/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mssql/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mssql/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mssql/ -title: prometheus.exporter.mssql description: Learn about prometheus.exporter.mssql +title: prometheus.exporter.mssql --- # prometheus.exporter.mssql The `prometheus.exporter.mssql` component embeds -[sql_exporter](https://github.com/burningalchemist/sql_exporter) for collecting stats from a Microsoft SQL Server. +[sql_exporter](https://github.com/burningalchemist/sql_exporter) for collecting stats from a Microsoft SQL Server and exposing them as +Prometheus metrics. ## Usage @@ -26,12 +28,13 @@ prometheus.exporter.mssql "LABEL" { The following arguments can be used to configure the exporter's behavior. Omitted fields take their default values. -| Name | Type | Description | Default | Required | -| ---------------------- | ---------- | ----------------------------------------------------------------- | ------- | -------- | -| `connection_string` | `secret` | The connection string used to connect to an Microsoft SQL Server. | | yes | -| `max_idle_connections` | `int` | Maximum number of idle connections to any one target. | `3` | no | -| `max_open_connections` | `int` | Maximum number of open connections to any one target. | `3` | no | -| `timeout` | `duration` | The query timeout in seconds. | `"10s"` | no | +| Name | Type | Description | Default | Required | +| ---------------------- | ---------- | ------------------------------------------------------------------- | ------- | -------- | +| `connection_string` | `secret` | The connection string used to connect to an Microsoft SQL Server. | | yes | +| `max_idle_connections` | `int` | Maximum number of idle connections to any one target. | `3` | no | +| `max_open_connections` | `int` | Maximum number of open connections to any one target. | `3` | no | +| `timeout` | `duration` | The query timeout in seconds. | `"10s"` | no | +| `query_config` | `string` | MSSQL query to Prometheus metric configuration as an inline string. | | no | [The sql_exporter examples](https://github.com/burningalchemist/sql_exporter/blob/master/examples/azure-sql-mi/sql_exporter.yml#L21) show the format of the `connection_string` argument: @@ -39,6 +42,15 @@ Omitted fields take their default values. sqlserver://USERNAME_HERE:PASSWORD_HERE@SQLMI_HERE_ENDPOINT.database.windows.net:1433?encrypt=true&hostNameInCertificate=%2A.SQL_MI_DOMAIN_HERE.database.windows.net&trustservercertificate=true ``` +If specified, the `query_config` argument must be a YAML document as string defining which MSSQL queries map to custom Prometheus metrics. +`query_config` is typically loaded by using the exports of another component. For example, + +- `local.file.LABEL.content` +- `remote.http.LABEL.content` +- `remote.s3.LABEL.content` + +See [sql_exporter](https://github.com/burningalchemist/sql_exporter#collectors) for details on how to create a configuration. + ## Blocks The `prometheus.exporter.mssql` component does not support any blocks, and is configured @@ -46,7 +58,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -99,3 +111,239 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + +## Custom metrics +You can use the optional `query_config` parameter to retrieve custom Prometheus metrics for a MSSQL instance. + +If this is defined, the new configuration will be used to query your MSSQL instance and create whatever Prometheus metrics are defined. +If you want additional metrics on top of the default metrics, the default configuration must be used as a base. + +The default configuration used by this integration is as follows: +``` +collector_name: mssql_standard + +metrics: + - metric_name: mssql_local_time_seconds + type: gauge + help: 'Local time in seconds since epoch (Unix time).' + values: [unix_time] + query: | + SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time + - metric_name: mssql_connections + type: gauge + help: 'Number of active connections.' + key_labels: + - db + values: [count] + query: | + SELECT DB_NAME(sp.dbid) AS db, COUNT(sp.spid) AS count + FROM sys.sysprocesses sp + GROUP BY DB_NAME(sp.dbid) + # + # Collected from sys.dm_os_performance_counters + # + - metric_name: mssql_deadlocks_total + type: counter + help: 'Number of lock requests that resulted in a deadlock.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Number of Deadlocks/sec' AND instance_name = '_Total' + - metric_name: mssql_user_errors_total + type: counter + help: 'Number of user errors.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Errors/sec' AND instance_name = 'User Errors' + - metric_name: mssql_kill_connection_errors_total + type: counter + help: 'Number of severe errors that caused SQL Server to kill the connection.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Errors/sec' AND instance_name = 'Kill Connection Errors' + - metric_name: mssql_page_life_expectancy_seconds + type: gauge + help: 'The minimum number of seconds a page will stay in the buffer pool on this node without references.' + values: [cntr_value] + query: | + SELECT top(1) cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Page life expectancy' + - metric_name: mssql_batch_requests_total + type: counter + help: 'Number of command batches received.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Batch Requests/sec' + - metric_name: mssql_log_growths_total + type: counter + help: 'Number of times the transaction log has been expanded, per database.' + key_labels: + - db + values: [cntr_value] + query: | + SELECT rtrim(instance_name) AS db, cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Log Growths' AND instance_name <> '_Total' + - metric_name: mssql_buffer_cache_hit_ratio + type: gauge + help: 'Ratio of requests that hit the buffer cache' + values: [BufferCacheHitRatio] + query: | + SELECT (a.cntr_value * 1.0 / b.cntr_value) * 100.0 as BufferCacheHitRatio + FROM sys.dm_os_performance_counters a + JOIN (SELECT cntr_value, OBJECT_NAME + FROM sys.dm_os_performance_counters + WHERE counter_name = 'Buffer cache hit ratio base' + AND OBJECT_NAME = 'SQLServer:Buffer Manager') b ON a.OBJECT_NAME = b.OBJECT_NAME + WHERE a.counter_name = 'Buffer cache hit ratio' + AND a.OBJECT_NAME = 'SQLServer:Buffer Manager' + + - metric_name: mssql_checkpoint_pages_sec + type: gauge + help: 'Checkpoint Pages Per Second' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters + WHERE [counter_name] = 'Checkpoint pages/sec' + # + # Collected from sys.dm_io_virtual_file_stats + # + - metric_name: mssql_io_stall_seconds_total + type: counter + help: 'Stall time in seconds per database and I/O operation.' + key_labels: + - db + value_label: operation + values: + - read + - write + query_ref: mssql_io_stall + + # + # Collected from sys.dm_os_process_memory + # + - metric_name: mssql_resident_memory_bytes + type: gauge + help: 'SQL Server resident memory size (AKA working set).' + values: [resident_memory_bytes] + query_ref: mssql_process_memory + + - metric_name: mssql_virtual_memory_bytes + type: gauge + help: 'SQL Server committed virtual memory size.' + values: [virtual_memory_bytes] + query_ref: mssql_process_memory + + - metric_name: mssql_available_commit_memory_bytes + type: gauge + help: 'SQL Server available to be committed memory size.' + values: [available_commit_limit_bytes] + query_ref: mssql_process_memory + + - metric_name: mssql_memory_utilization_percentage + type: gauge + help: 'The percentage of committed memory that is in the working set.' + values: [memory_utilization_percentage] + query_ref: mssql_process_memory + + - metric_name: mssql_page_fault_count_total + type: counter + help: 'The number of page faults that were incurred by the SQL Server process.' + values: [page_fault_count] + query_ref: mssql_process_memory + + # + # Collected from sys.dm_os_sys_info + # + - metric_name: mssql_server_total_memory_bytes + type: gauge + help: 'SQL Server committed memory in the memory manager.' + values: [committed_memory_bytes] + query_ref: mssql_os_sys_info + + - metric_name: mssql_server_target_memory_bytes + type: gauge + help: 'SQL Server target committed memory set for the memory manager.' + values: [committed_memory_target_bytes] + query_ref: mssql_os_sys_info + + # + # Collected from sys.dm_os_sys_memory + # + - metric_name: mssql_os_memory + type: gauge + help: 'OS physical memory, used and available.' + value_label: 'state' + values: [used, available] + query: | + SELECT + (total_physical_memory_kb - available_physical_memory_kb) * 1024 AS used, + available_physical_memory_kb * 1024 AS available + FROM sys.dm_os_sys_memory + - metric_name: mssql_os_page_file + type: gauge + help: 'OS page file, used and available.' + value_label: 'state' + values: [used, available] + query: | + SELECT + (total_page_file_kb - available_page_file_kb) * 1024 AS used, + available_page_file_kb * 1024 AS available + FROM sys.dm_os_sys_memory +queries: + # Populates `mssql_io_stall` and `mssql_io_stall_total` + - query_name: mssql_io_stall + query: | + SELECT + cast(DB_Name(a.database_id) as varchar) AS [db], + sum(io_stall_read_ms) / 1000.0 AS [read], + sum(io_stall_write_ms) / 1000.0 AS [write] + FROM + sys.dm_io_virtual_file_stats(null, null) a + INNER JOIN sys.master_files b ON a.database_id = b.database_id AND a.file_id = b.file_id + GROUP BY a.database_id + # Populates `mssql_resident_memory_bytes`, `mssql_virtual_memory_bytes`, mssql_available_commit_memory_bytes, + # and `mssql_memory_utilization_percentage`, and `mssql_page_fault_count_total` + - query_name: mssql_process_memory + query: | + SELECT + physical_memory_in_use_kb * 1024 AS resident_memory_bytes, + virtual_address_space_committed_kb * 1024 AS virtual_memory_bytes, + available_commit_limit_kb * 1024 AS available_commit_limit_bytes, + memory_utilization_percentage, + page_fault_count + FROM sys.dm_os_process_memory + # Populates `mssql_server_total_memory_bytes` and `mssql_server_target_memory_bytes`. + - query_name: mssql_os_sys_info + query: | + SELECT + committed_kb * 1024 AS committed_memory_bytes, + committed_target_kb * 1024 AS committed_memory_target_bytes + FROM sys.dm_os_sys_info +``` + + + +## Compatible components + +`prometheus.exporter.mssql` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md b/docs/sources/flow/reference/components/prometheus.exporter.mysql.md index a3b2569879a8..7c0cb90ae69f 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mysql.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mysql/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mysql/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mysql/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mysql/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mysql/ -title: prometheus.exporter.mysql description: Learn about prometheus.exporter.mysql +title: prometheus.exporter.mysql --- # prometheus.exporter.mysql @@ -157,7 +158,7 @@ The full list of supported collectors is: ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -211,3 +212,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.mysql` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md b/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md index 06437927d7a3..10712ba290d5 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.oracledb/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.oracledb/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.oracledb/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.oracledb/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.oracledb/ -title: prometheus.exporter.oracledb description: Learn about prometheus.exporter.oracledb +title: prometheus.exporter.oracledb --- # prometheus.exporter.oracledb @@ -46,7 +47,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -99,3 +100,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.oracledb` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md index c219c2a62d62..39cfd8770108 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md @@ -3,11 +3,12 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.postgres/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.postgres/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.postgres/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.postgres/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.postgres/ +description: Learn about prometheus.exporter.postgres labels: stage: beta title: prometheus.exporter.postgres -description: Learn about prometheus.exporter.postgres --- # prometheus.exporter.postgres @@ -71,7 +72,7 @@ If `autodiscovery` is disabled, neither `database_allowlist` nor `database_denyl ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -212,3 +213,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.postgres` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.process.md b/docs/sources/flow/reference/components/prometheus.exporter.process.md index 730071fc471d..ddd315f28797 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.process.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.process.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.process/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.process/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.process/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.process/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.process/ -title: prometheus.exporter.process description: Learn about prometheus.exporter.process +title: prometheus.exporter.process --- # prometheus.exporter.process @@ -75,7 +76,7 @@ Each regex in `cmdline` must match the corresponding argv for the process to be ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -132,3 +133,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.process` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.redis.md b/docs/sources/flow/reference/components/prometheus.exporter.redis.md index 7c310c801e6d..cebbbdd02906 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.redis.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.redis.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.redis/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.redis/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.redis/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.redis/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.redis/ -title: prometheus.exporter.redis description: Learn about prometheus.exporter.redis +title: prometheus.exporter.redis --- # prometheus.exporter.redis @@ -77,7 +78,7 @@ Note that setting `export_client_port` increases the cardinality of all Redis me ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -130,3 +131,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.redis` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md b/docs/sources/flow/reference/components/prometheus.exporter.snmp.md index 448f51ff2aa3..1e69da7fb941 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.snmp.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.snmp/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.snmp/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.snmp/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.snmp/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.snmp/ -title: prometheus.exporter.snmp description: Learn about prometheus.exporter.snmp +title: prometheus.exporter.snmp --- # prometheus.exporter.snmp @@ -87,7 +88,7 @@ The `walk_param` block may be specified multiple times to define multiple SNMP c ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -197,3 +198,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.snmp` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md b/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md index 5e06636dc43d..f384fd1a6805 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.snowflake/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.snowflake/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.snowflake/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.snowflake/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.snowflake/ -title: prometheus.exporter.snowflake description: Learn about prometheus.exporter.snowflake +title: prometheus.exporter.snowflake --- # prometheus.exporter.snowflake @@ -44,7 +45,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -100,3 +101,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.snowflake` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.squid.md b/docs/sources/flow/reference/components/prometheus.exporter.squid.md index 606b824dc8b7..49a8639c129d 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.squid.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.squid.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.squid/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.squid/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.squid/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.squid/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.squid/ -title: prometheus.exporter.squid description: Learn about prometheus.exporter.squid +title: prometheus.exporter.squid --- # prometheus.exporter.squid @@ -39,7 +40,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -92,3 +93,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.squid` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md b/docs/sources/flow/reference/components/prometheus.exporter.statsd.md index f9258522de96..2e00b8db35b0 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.statsd.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.statsd/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.statsd/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.statsd/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.statsd/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.statsd/ -title: prometheus.exporter.statsd description: Learn about prometheus.exporter.statsd +title: prometheus.exporter.statsd --- # prometheus.exporter.statsd @@ -58,7 +59,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -125,3 +126,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.statsd` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.unix.md b/docs/sources/flow/reference/components/prometheus.exporter.unix.md index 95f3ce6f9993..ab2d88c8175e 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.unix.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.unix.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.unix/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.unix/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.unix/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.unix/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.unix/ -title: prometheus.exporter.unix description: Learn about prometheus.exporter.unix +title: prometheus.exporter.unix --- # prometheus.exporter.unix @@ -261,7 +262,7 @@ An explicit value in the block takes precedence over the environment variable. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -408,3 +409,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.unix` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md b/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md index 9939defbcd57..61c951e9c71d 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md @@ -3,6 +3,7 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.vsphere/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.vsphere/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.vsphere/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.vsphere/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.vsphere/ title: prometheus.exporter.vsphere description: Learn about prometheus.exporter.vsphere @@ -12,6 +13,11 @@ description: Learn about prometheus.exporter.vsphere The `prometheus.exporter.vsphere` component embeds [`vmware_exporter`](https://github.com/grafana/vmware_exporter) to collect vSphere metrics +> **NOTE**: We recommend to use [otelcol.receiver.vcenter][] instead. + +[otelcol.receiver.vcenter]: {{< relref "./otelcol.receiver.vcenter.md" >}} + + ## Usage ```river @@ -39,7 +45,7 @@ Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -83,3 +89,20 @@ prometheus.remote_write "default" { ``` [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.vsphere` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.exporter.windows.md b/docs/sources/flow/reference/components/prometheus.exporter.windows.md index 98bd096a3329..4ad33effdd4a 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.windows.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.windows.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.windows/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.windows/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.windows/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.windows/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.windows/ -title: prometheus.exporter.windows description: Learn about prometheus.exporter.windows +title: prometheus.exporter.windows --- # prometheus.exporter.windows @@ -46,11 +47,11 @@ The following blocks are supported inside the definition of `prometheus.exporter.windows` to configure collector-specific options: Hierarchy | Name | Description | Required ----------------|--------------------|------------------------------------------|---------- -dfsr | [dfsr][] | Configures the dfsr collector. | no +---------------|--------------------|------------------------------------------|--------- +dfsr | [dfsr][] | Configures the dfsr collector. | no exchange | [exchange][] | Configures the exchange collector. | no iis | [iis][] | Configures the iis collector. | no -logical_disk | [logical_disk][] | Configures the logical_disk collector. | no +logical_disk | [logical_disk][] | Configures the logical_disk collector. | no msmq | [msmq][] | Configures the msmq collector. | no mssql | [mssql][] | Configures the mssql collector. | no network | [network][] | Configures the network collector. | no @@ -187,7 +188,7 @@ When `text_file_directory` is set, only files with the extension `.prom` inside ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} ## Component health @@ -270,6 +271,11 @@ Name | Description | Enabled by default See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples. +{{% admonition type="caution" %}} +Certain collectors will cause {{< param "PRODUCT_ROOT_NAME" >}} to crash if those collectors are used and the required infrastructure is not installed. +These include but are not limited to mscluster_*, vmware, nps, dns, msmq, teradici_pcoip, ad, hyperv, and scheduled_task. +{{% /admonition %}} + ## Example This example uses a [`prometheus.scrape` component][scrape] to collect metrics @@ -301,3 +307,20 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + + + +## Compatible components + +`prometheus.exporter.windows` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md b/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md index 89a1fcb81df5..fa324640d0ee 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md +++ b/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.podmonitors/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.podmonitors/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.podmonitors/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.podmonitors/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.podmonitors/ +description: Learn about prometheus.operator.podmonitors labels: stage: beta title: prometheus.operator.podmonitors -description: Learn about prometheus.operator.podmonitors --- # prometheus.operator.podmonitors -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `prometheus.operator.podmonitors` discovers [PodMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor) resources in your kubernetes cluster and scrapes the targets they reference. This component performs three main functions: @@ -20,7 +21,7 @@ description: Learn about prometheus.operator.podmonitors 2. Discover Pods in your cluster that match those PodMonitors. 3. Scrape metrics from those Pods, and forward them to a receiver. -The default configuration assumes the agent is running inside a Kubernetes cluster, and uses the in-cluster config to access the Kubernetes API. It can be run from outside the cluster by supplying connection info in the `client` block, but network level access to pods is required to scrape metrics from them. +The default configuration assumes {{< param "PRODUCT_NAME" >}} is running inside a Kubernetes cluster, and uses the in-cluster configuration to access the Kubernetes API. It can be run from outside the cluster by supplying connection info in the `client` block, but network level access to pods is required to scrape metrics from them. PodMonitors may reference secrets for authenticating to targets to scrape them. In these cases, the secrets are loaded and refreshed only when the PodMonitor is updated or when this component refreshes its' internal state, which happens on a 5-minute refresh cycle. @@ -57,7 +58,7 @@ rule | [rule][] | Relabeling rules to apply to discovered targets. | no scrape | [scrape][] | Default scrape configuration to apply to discovered targets. | no selector | [selector][] | Label selector for which PodMonitors to discover. | no selector > match_expression | [match_expression][] | Label selector expression for which PodMonitors to discover. | no -clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_ROOT_NAME" >}} is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -77,7 +78,7 @@ inside a `client` block. ### client block The `client` block configures the Kubernetes client used to discover PodMonitors. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -100,27 +101,27 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} ### selector block @@ -162,7 +163,7 @@ Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `enabled` | `bool` | Enables sharing targets with other cluster nodes. | `false` | yes -When the agent is [using clustering][], and `enabled` is set to true, +When {{< param "PRODUCT_ROOT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this component instance opts-in to participating in the cluster to distribute scrape load between all cluster nodes. @@ -181,7 +182,7 @@ sharding where _all_ nodes have to be re-distributed, as only 1/N of the target's ownership is transferred, but is eventually consistent (rather than fully consistent like hashmod sharding is). -If the agent is _not_ running in clustered mode, then the block is a no-op, and +If {{< param "PRODUCT_ROOT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.podmonitors` scrapes every target it receives in its arguments. [using clustering]: {{< relref "../../concepts/clustering.md" >}} @@ -243,7 +244,7 @@ prometheus.operator.podmonitors "pods" { } ``` -This example will apply additional relabel rules to discovered targets to filter by hostname. This may be useful if running the agent as a DaemonSet. +This example will apply additional relabel rules to discovered targets to filter by hostname. This may be useful if running {{< param "PRODUCT_ROOT_NAME" >}} as a DaemonSet. ```river prometheus.operator.podmonitors "pods" { @@ -255,3 +256,20 @@ prometheus.operator.podmonitors "pods" { } } ``` + + +## Compatible components + +`prometheus.operator.podmonitors` can accept arguments from the following components: + +- Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.operator.probes.md b/docs/sources/flow/reference/components/prometheus.operator.probes.md index d27e43f49f11..256634a88438 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.probes.md +++ b/docs/sources/flow/reference/components/prometheus.operator.probes.md @@ -3,26 +3,30 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.probes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.probes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.probes/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.probes/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.probes/ +description: Learn about prometheus.operator.probes labels: stage: beta title: prometheus.operator.probes -description: Learn about prometheus.operator.probes --- # prometheus.operator.probes -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} -`prometheus.operator.probes` discovers [Probe](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe) resources in your Kubernetes cluster and scrapes the targets they reference. This component performs three main functions: +`prometheus.operator.probes` discovers [Probe](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe) resources in your Kubernetes cluster and scrapes the targets they reference. + This component performs three main functions: 1. Discover Probe resources from your Kubernetes cluster. -2. Discover targets or ingresses that match those Probes. -3. Scrape metrics from those endpoints, and forward them to a receiver. +1. Discover targets or ingresses that match those Probes. +1. Scrape metrics from those endpoints, and forward them to a receiver. -The default configuration assumes the agent is running inside a Kubernetes cluster, and uses the in-cluster config to access the Kubernetes API. It can be run from outside the cluster by supplying connection info in the `client` block, but network level access to pods is required to scrape metrics from them. +The default configuration assumes {{< param "PRODUCT_NAME" >}} is running inside a Kubernetes cluster, and uses the in-cluster config to access the Kubernetes API. +It can be run from outside the cluster by supplying connection info in the `client` block, but network level access to pods is required to scrape metrics from them. -Probes may reference secrets for authenticating to targets to scrape them. In these cases, the secrets are loaded and refreshed only when the Probe is updated or when this component refreshes its' internal state, which happens on a 5-minute refresh cycle. +Probes may reference secrets for authenticating to targets to scrape them. +In these cases, the secrets are loaded and refreshed only when the Probe is updated or when this component refreshes its' internal state, which happens on a 5-minute refresh cycle. ## Usage @@ -77,8 +81,7 @@ inside a `client` block. ### client block The `client` block configures the Kubernetes client used to discover Probes. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is -used. +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -100,27 +103,27 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} ### selector block @@ -162,7 +165,7 @@ Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `enabled` | `bool` | Enables sharing targets with other cluster nodes. | `false` | yes -When the agent is running in [clustered mode][], and `enabled` is set to true, +When {{< param "PRODUCT_NAME" >}} is running in [clustered mode][], and `enabled` is set to true, then this component instance opts-in to participating in the cluster to distribute scrape load between all cluster nodes. @@ -181,14 +184,14 @@ sharding where _all_ nodes have to be re-distributed, as only 1/N of the target's ownership is transferred, but is eventually consistent (rather than fully consistent like hashmod sharding is). -If the agent is _not_ running in clustered mode, then the block is a no-op, and +If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.probes` scrapes every target it receives in its arguments. [clustered mode]: {{< relref "../cli/run.md#clustering-beta" >}} ## Exported fields -`prometheus.operator.probes` does not export any fields. It forwards all metrics it scrapes to the receiver configures with the `forward_to` argument. +`prometheus.operator.probes` does not export any fields. It forwards all metrics it scrapes to the receivers configured with the `forward_to` argument. ## Component health @@ -243,7 +246,7 @@ prometheus.operator.probes "pods" { } ``` -This example will apply additional relabel rules to discovered targets to filter by hostname. This may be useful if running the agent as a DaemonSet. +This example will apply additional relabel rules to discovered targets to filter by hostname. This may be useful if running {{< param "PRODUCT_NAME" >}} as a DaemonSet. ```river prometheus.operator.probes "probes" { @@ -255,3 +258,20 @@ prometheus.operator.probes "probes" { } } ``` + + +## Compatible components + +`prometheus.operator.probes` can accept arguments from the following components: + +- Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md index 2870418b0c54..8b2e0ce29cdf 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md +++ b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md @@ -3,26 +3,29 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.servicemonitors/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.servicemonitors/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.servicemonitors/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.servicemonitors/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.servicemonitors/ +description: Learn about prometheus.operator.servicemonitors labels: stage: beta title: prometheus.operator.servicemonitors -description: Learn about prometheus.operator.servicemonitors --- # prometheus.operator.servicemonitors -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `prometheus.operator.servicemonitors` discovers [ServiceMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor) resources in your kubernetes cluster and scrapes the targets they reference. This component performs three main functions: 1. Discover ServiceMonitor resources from your Kubernetes cluster. -2. Discover Services and Endpoints in your cluster that match those ServiceMonitors. -3. Scrape metrics from those Endpoints, and forward them to a receiver. +1. Discover Services and Endpoints in your cluster that match those ServiceMonitors. +1. Scrape metrics from those Endpoints, and forward them to a receiver. -The default configuration assumes the agent is running inside a Kubernetes cluster, and uses the in-cluster config to access the Kubernetes API. It can be run from outside the cluster by supplying connection info in the `client` block, but network level access to discovered endpoints is required to scrape metrics from them. +The default configuration assumes {{< param "PRODUCT_NAME" >}} is running inside a Kubernetes cluster, and uses the in-cluster configuration to access the Kubernetes API. +It can be run from outside the cluster by supplying connection info in the `client` block, but network level access to discovered endpoints is required to scrape metrics from them. -ServiceMonitors may reference secrets for authenticating to targets to scrape them. In these cases, the secrets are loaded and refreshed only when the ServiceMonitor is updated or when this component refreshes its' internal state, which happens on a 5-minute refresh cycle. +ServiceMonitors may reference secrets for authenticating to targets to scrape them. +In these cases, the secrets are loaded and refreshed only when the ServiceMonitor is updated or when this component refreshes its' internal state, which happens on a 5-minute refresh cycle. ## Usage @@ -57,7 +60,7 @@ rule | [rule][] | Relabeling rules to apply to discovered targets. | no scrape | [scrape][] | Default scrape configuration to apply to discovered targets. | no selector | [selector][] | Label selector for which ServiceMonitors to discover. | no selector > match_expression | [match_expression][] | Label selector expression for which ServiceMonitors to discover. | no -clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -76,9 +79,8 @@ inside a `client` block. ### client block -The `client` block configures the Kubernetes client used to discover ServiceMonitors. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is -used. +The `client` block configures the Kubernetes client used to discover ServiceMonitors. +If the `client` block isn't provided, the default in-cluster configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -100,27 +102,27 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} ### selector block @@ -162,7 +164,7 @@ Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `enabled` | `bool` | Enables sharing targets with other cluster nodes. | `false` | yes -When the agent is using [using clustering][], and `enabled` is set to true, +When {{< param "PRODUCT_NAME" >}} is using [using clustering][], and `enabled` is set to true, then this component instance opts-in to participating in the cluster to distribute scrape load between all cluster nodes. @@ -181,7 +183,7 @@ sharding where _all_ nodes have to be re-distributed, as only 1/N of the target's ownership is transferred, but is eventually consistent (rather than fully consistent like hashmod sharding is). -If the agent is _not_ running in clustered mode, then the block is a no-op, and +If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.servicemonitors` scrapes every target it receives in its arguments. [using clustering]: {{< relref "../../concepts/clustering.md" >}} @@ -244,7 +246,7 @@ prometheus.operator.servicemonitors "services" { } ``` -This example will apply additional relabel rules to discovered targets to filter by hostname. This may be useful if running the agent as a DaemonSet. +This example will apply additional relabel rules to discovered targets to filter by hostname. This may be useful if running {{< param "PRODUCT_NAME" >}} as a DaemonSet. ```river prometheus.operator.servicemonitors "services" { @@ -256,3 +258,20 @@ prometheus.operator.servicemonitors "services" { } } ``` + + +## Compatible components + +`prometheus.operator.servicemonitors` can accept arguments from the following components: + +- Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.receive_http.md b/docs/sources/flow/reference/components/prometheus.receive_http.md index 863c5db1821f..d48985cc3f18 100644 --- a/docs/sources/flow/reference/components/prometheus.receive_http.md +++ b/docs/sources/flow/reference/components/prometheus.receive_http.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.receive_http/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.receive_http/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.receive_http/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.receive_http/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.receive_http/ -title: prometheus.receive_http description: Learn about prometheus.receive_http +title: prometheus.receive_http --- # prometheus.receive_http `prometheus.receive_http` listens for HTTP requests containing Prometheus metric samples and forwards them to other components capable of receiving metrics. -The HTTP API exposed is compatible with [Prometheus `remote_write` API][prometheus-remote-write-docs]. This means that other [`prometheus.remote_write`][prometheus.remote_write] components can be used as a client and send requests to `prometheus.receive_http` which enables using the Agent as a proxy for prometheus metrics. +The HTTP API exposed is compatible with [Prometheus `remote_write` API][prometheus-remote-write-docs]. This means that other [`prometheus.remote_write`][prometheus.remote_write] components can be used as a client and send requests to `prometheus.receive_http` which enables using {{< param "PRODUCT_ROOT_NAME" >}} as a proxy for prometheus metrics. [prometheus.remote_write]: {{< relref "./prometheus.remote_write.md" >}} [prometheus-remote-write-docs]: https://prometheus.io/docs/prometheus/2.45/querying/api/#remote-write-receiver @@ -23,7 +24,7 @@ The HTTP API exposed is compatible with [Prometheus `remote_write` API][promethe prometheus.receive_http "LABEL" { http { listen_address = "LISTEN_ADDRESS" - listen_port = PORT + listen_port = PORT } forward_to = RECEIVER_LIST } @@ -31,29 +32,29 @@ prometheus.receive_http "LABEL" { The component will start an HTTP server supporting the following endpoint: -- `POST /api/v1/metrics/write` - send metrics to the component, which in turn will be forwarded to the receivers as configured in `forward_to` argument. The request format must match that of [Prometheus `remote_write` API][prometheus-remote-write-docs]. One way to send valid requests to this component is to use another Grafana Agent with a [`prometheus.remote_write`][prometheus.remote_write] component. +- `POST /api/v1/metrics/write` - send metrics to the component, which in turn will be forwarded to the receivers as configured in `forward_to` argument. The request format must match that of [Prometheus `remote_write` API][prometheus-remote-write-docs]. One way to send valid requests to this component is to use another {{< param "PRODUCT_ROOT_NAME" >}} with a [`prometheus.remote_write`][prometheus.remote_write] component. ## Arguments `prometheus.receive_http` supports the following arguments: - Name | Type | Description | Default | Required ---------------|------------------|---------------------------------------|---------|---------- - `forward_to` | `list(receiver)` | List of receivers to send metrics to. | | yes +Name | Type | Description | Default | Required +-------------|------------------|---------------------------------------|---------|--------- +`forward_to` | `list(MetricsReceiver)` | List of receivers to send metrics to. | | yes ## Blocks The following blocks are supported inside the definition of `prometheus.receive_http`: - Hierarchy | Name | Description | Required ------------|----------|----------------------------------------------------|---------- - `http` | [http][] | Configures the HTTP server that receives requests. | no +Hierarchy | Name | Description | Required +----------|----------|----------------------------------------------------|--------- +`http` | [http][] | Configures the HTTP server that receives requests. | no [http]: #http ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} ## Exported fields @@ -105,7 +106,7 @@ prometheus.remote_write "local" { ### Proxying metrics -In order to send metrics to the `prometheus.receive_http` component defined in the previous example, another Grafana Agent can run with the following configuration: +In order to send metrics to the `prometheus.receive_http` component defined in the previous example, another {{< param "PRODUCT_ROOT_NAME" >}} can run with the following configuration: ```river // Collects metrics of localhost:12345 @@ -116,15 +117,32 @@ prometheus.scrape "agent_self" { forward_to = [prometheus.remote_write.local.receiver] } -// Writes metrics to localhost:9999/api/v1/metrics/write - e.g. served by +// Writes metrics to localhost:9999/api/v1/metrics/write - e.g. served by // the prometheus.receive_http component from the example above. prometheus.remote_write "local" { endpoint { url = "http://localhost:9999/api/v1/metrics/write" - } + } } ``` ## Technical details `prometheus.receive_http` uses [snappy](https://en.wikipedia.org/wiki/Snappy_(compression)) for compression. + + +## Compatible components + +`prometheus.receive_http` can accept arguments from the following components: + +- Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.relabel.md b/docs/sources/flow/reference/components/prometheus.relabel.md index 23fb71455a41..65cb02394d4a 100644 --- a/docs/sources/flow/reference/components/prometheus.relabel.md +++ b/docs/sources/flow/reference/components/prometheus.relabel.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.relabel/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.relabel/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.relabel/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.relabel/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.relabel/ -title: prometheus.relabel description: Learn about prometheus.relabel +title: prometheus.relabel --- # prometheus.relabel @@ -54,7 +55,8 @@ The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`forward_to` | `list(receiver)` | Where the metrics should be forwarded to, after relabeling takes place. | | yes +`forward_to` | `list(MetricsReceiver)` | Where the metrics should be forwarded to, after relabeling takes place. | | yes +`max_cache_size` | `int` | The maximum number of elements to hold in the relabeling cache. | 100,000 | no ## Blocks @@ -68,7 +70,7 @@ rule | [rule][] | Relabeling rules to apply to received metrics. | no ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} ## Exported fields @@ -76,7 +78,7 @@ The following fields are exported and can be referenced by other components: Name | Type | Description ---- | ---- | ----------- -`receiver` | `receiver` | The input receiver where samples are sent to be relabeled. +`receiver` | `MetricsReceiver` | The input receiver where samples are sent to be relabeled. `rules` | `RelabelRules` | The currently configured relabeling rules. ## Component health @@ -167,3 +169,23 @@ metric_a{host = "cluster_a/production", __address__ = "cluster_a", app = "backe The two resulting metrics are then propagated to each receiver defined in the `forward_to` argument. + + +## Compatible components + +`prometheus.relabel` can accept arguments from the following components: + +- Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) + +`prometheus.relabel` has exports that can be consumed by the following components: + +- Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/prometheus.remote_write.md b/docs/sources/flow/reference/components/prometheus.remote_write.md index e2ebd4b9cf29..f869343e0919 100644 --- a/docs/sources/flow/reference/components/prometheus.remote_write.md +++ b/docs/sources/flow/reference/components/prometheus.remote_write.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.remote_write/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.remote_write/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.remote_write/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/ -title: prometheus.remote_write description: Learn about prometheus.remote_write +title: prometheus.remote_write --- # prometheus.remote_write @@ -126,31 +127,31 @@ metrics fails. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### sigv4 block -{{< docs/shared lookup="flow/reference/components/sigv4-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/sigv4-block.md" source="agent" version="" >}} ### azuread block -{{< docs/shared lookup="flow/reference/components/azuread-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/azuread-block.md" source="agent" version="" >}} ### managed_identity block -{{< docs/shared lookup="flow/reference/components/managed_identity-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/managed_identity-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### queue_config block @@ -164,6 +165,7 @@ Name | Type | Description | Default | Required `min_backoff` | `duration` | Initial retry delay. The backoff time gets doubled for each retry. | `"30ms"` | no `max_backoff` | `duration` | Maximum retry delay. | `"5s"` | no `retry_on_http_429` | `bool` | Retry when an HTTP 429 status code is received. | `true` | no +`sample_age_limit` | `duration` | Maximum age of samples to send. | `"0s"` | no Each queue then manages a number of concurrent _shards_ which is responsible for sending a fraction of data to their respective endpoints. The number of @@ -190,6 +192,10 @@ responses should be treated as recoverable errors; other `HTTP 4xx` status code responses are never considered recoverable errors. When `retry_on_http_429` is enabled, `Retry-After` response headers from the servers are honored. +The `sample_age_limit` argument specifies the maximum age of samples to send. Any +samples older than the limit are dropped and won't be sent to the remote storage. +The default value is `0s`, which means that all samples are sent (feature is disabled). + ### metadata_config block Name | Type | Description | Default | Required @@ -200,7 +206,7 @@ Name | Type | Description | Default | Required ### write_relabel_config block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} ### wal block @@ -219,7 +225,7 @@ The WAL serves two primary purposes: * Populate in-memory cache after a process restart. The WAL is located inside a component-specific directory relative to the -storage path Grafana Agent is configured to use. See the +storage path {{< param "PRODUCT_NAME" >}} is configured to use. See the [`agent run` documentation][run] for how to change the storage path. The `truncate_frequency` argument configures how often to clean up the WAL. @@ -241,7 +247,7 @@ The following fields are exported and can be referenced by other components: Name | Type | Description ---- | ---- | ----------- -`receiver` | `receiver` | A value which other components can use to send metrics to. +`receiver` | `MetricsReceiver` | A value which other components can use to send metrics to. ## Component health @@ -354,13 +360,31 @@ prometheus.remote_write "staging" { // prometheus.remote_write component. prometheus.scrape "demo" { targets = [ - // Collect metrics from Grafana Agent's default HTTP listen address. + // Collect metrics from the default HTTP listen address. {"__address__" = "127.0.0.1:12345"}, ] forward_to = [prometheus.remote_write.staging.receiver] } ``` + +### Send metrics to a Mimir instance with a tenant specified + +You can create a `prometheus.remote_write` component that sends your metrics to a specific tenant within the Mimir instance. This is useful when your Mimir instance is using more than one tenant: + +```river +prometheus.remote_write "staging" { + // Send metrics to a Mimir instance + endpoint { + url = "http://mimir:9009/api/v1/push" + + headers = { + "X-Scope-OrgID" = "staging", + } + } +} +``` + ### Send metrics to a managed service You can create a `prometheus.remote_write` component that sends your metrics to a managed service, for example, Grafana Cloud. The Prometheus username and the Grafana Cloud API Key are injected in this example through environment variables. @@ -384,5 +408,21 @@ Any labels that start with `__` will be removed before sending to the endpoint. ## Data retention -{{< docs/shared source="agent" lookup="/wal-data-retention.md" version="" >}} +{{< docs/shared source="agent" lookup="/wal-data-retention.md" version="" >}} + + + +## Compatible components + +`prometheus.remote_write` has exports that can be consumed by the following components: + +- Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.scrape.md b/docs/sources/flow/reference/components/prometheus.scrape.md index 25159245bf2d..8adf775687f1 100644 --- a/docs/sources/flow/reference/components/prometheus.scrape.md +++ b/docs/sources/flow/reference/components/prometheus.scrape.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/prometheus.scrape/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.scrape/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.scrape/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.scrape/ -title: prometheus.scrape description: Learn about prometheus.scrape +title: prometheus.scrape --- # prometheus.scrape @@ -43,30 +44,30 @@ The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of targets to scrape. | | yes -`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes -`job_name` | `string` | The value to use for the job label if not already set. | component name | no -`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no +`targets` | `list(map(string))` | List of targets to scrape. | | yes +`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes +`job_name` | `string` | The value to use for the job label if not already set. | component name | no +`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no `enable_protobuf_negotiation` | `bool` | Whether to enable protobuf negotiation with the client. | `false` | no -`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no -`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no -`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no -`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no -`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape config. | `"60s"` | no -`scrape_timeout` | `duration` | The timeout for scraping targets of this config. | `"10s"` | no -`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no -`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no -`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no -`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no -`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no -`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no -`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no -`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no +`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no +`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no +`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no +`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no +`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"10s"` | no +`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no +`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no +`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no +`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no +`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no +`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no +`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no +`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no At most one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -101,19 +102,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### clustering (beta) @@ -121,7 +122,7 @@ Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `enabled` | `bool` | Enables sharing targets with other cluster nodes. | `false` | yes -When the agent is [using clustering][], and `enabled` is set to true, +When {{< param "PRODUCT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `prometheus.scrape` component instance opts-in to participating in the cluster to distribute scrape load between all cluster nodes. @@ -141,7 +142,7 @@ sharding where _all_ nodes have to be re-distributed, as only 1/N of the targets ownership is transferred, but is eventually consistent (rather than fully consistent like hashmod sharding is). -If the agent is _not_ running in clustered mode, then the block is a no-op and +If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op and `prometheus.scrape` scrapes every target it receives in its arguments. [using clustering]: {{< relref "../../concepts/clustering.md" >}} @@ -280,9 +281,28 @@ The following special labels can change the behavior of prometheus.scrape: * `__scheme__` is the name of the label that holds the scheme (http,https) on which to scrape a target. * `__scrape_interval__` is the name of the label that holds the scrape interval used to scrape a target. * `__scrape_timeout__` is the name of the label that holds the scrape timeout used to scrape a target. -* `__param__` is a prefix for labels that provide URL parameters used to scrape a target. +* `__param_` is a prefix for labels that provide URL parameters `` used to scrape a target. Special labels added after a scrape * `__name__` is the label name indicating the metric name of a timeseries. * `job` is the label name indicating the job from which a timeseries was scraped. * `instance` is the label name used for the instance label. + + + +## Compatible components + +`prometheus.scrape` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/pyroscope.ebpf.md b/docs/sources/flow/reference/components/pyroscope.ebpf.md index 08dde0a41826..a324e71293ab 100644 --- a/docs/sources/flow/reference/components/pyroscope.ebpf.md +++ b/docs/sources/flow/reference/components/pyroscope.ebpf.md @@ -1,24 +1,25 @@ --- aliases: - - /docs/grafana-cloud/agent/flow/reference/components/pyroscope.ebpf/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.ebpf/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.ebpf/ +- /docs/grafana-cloud/agent/flow/reference/components/pyroscope.ebpf/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.ebpf/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.ebpf/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.ebpf/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.ebpf/ +description: Learn about pyroscope.ebpf labels: stage: beta title: pyroscope.ebpf -description: Learn about pyroscope.ebpf --- # pyroscope.ebpf -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `pyroscope.ebpf` configures an ebpf profiling job for the current host. The collected performance profiles are forwarded to the list of receivers passed in `forward_to`. {{% admonition type="note" %}} -To use the `pyroscope.ebpf` component you must run Grafana Agent as root and inside host pid namespace. +To use the `pyroscope.ebpf` component you must run {{< param "PRODUCT_NAME" >}} as root and inside host pid namespace. {{% /admonition %}} You can specify multiple `pyroscope.ebpf` components by giving them different labels, however it is not recommended as @@ -44,15 +45,15 @@ values. | Name | Type | Description | Default | Required | |---------------------------|--------------------------|-------------------------------------------------------------------------------------|---------|----------| | `targets` | `list(map(string))` | List of targets to group profiles by container id | | yes | -| `forward_to` | `list(ProfilesReceiver)` | List of receivers to send collected profiles to. | | yes | -| `collect_interval` | `duration` | How frequently to collect profiles | `15s` | no | -| `sample_rate` | `int` | How many times per second to collect profile samples | 97 | no | -| `pid_cache_size` | `int` | The size of the pid -> proc symbols table LRU cache | 32 | no | -| `build_id_cache_size` | `int` | The size of the elf file build id -> symbols table LRU cache | 64 | no | -| `same_file_cache_size` | `int` | The size of the elf file -> symbols table LRU cache | 8 | no | -| `container_id_cache_size` | `int` | The size of the pid -> container ID table LRU cache | 1024 | no | -| `collect_user_profile` | `bool` | A flag to enable/disable collection of userspace profiles | true | no | -| `collect_kernel_profile` | `bool` | A flag to enable/disable collection of kernelspace profiles | true | no | +| `forward_to` | `list(ProfilesReceiver)` | List of receivers to send collected profiles to. | | yes | +| `collect_interval` | `duration` | How frequently to collect profiles | `15s` | no | +| `sample_rate` | `int` | How many times per second to collect profile samples | 97 | no | +| `pid_cache_size` | `int` | The size of the pid -> proc symbols table LRU cache | 32 | no | +| `build_id_cache_size` | `int` | The size of the elf file build id -> symbols table LRU cache | 64 | no | +| `same_file_cache_size` | `int` | The size of the elf file -> symbols table LRU cache | 8 | no | +| `container_id_cache_size` | `int` | The size of the pid -> container ID table LRU cache | 1024 | no | +| `collect_user_profile` | `bool` | A flag to enable/disable collection of userspace profiles | true | no | +| `collect_kernel_profile` | `bool` | A flag to enable/disable collection of kernelspace profiles | true | no | | `demangle` | `string` | C++ demangle mode. Available options are: `none`, `simplified`, `templates`, `full` | `none` | no | | `python_enabled` | `bool` | A flag to enable/disable python profiling | true | no | @@ -191,9 +192,9 @@ Interpreted methods will display the interpreter function’s name rather than t ### Kubernetes discovery In the following example, performance profiles are collected from pods on the same node, discovered using -`discovery.kubernetes`. Pod selection relies on the `HOSTNAME` environment variable, which is a pod name if the agent is -used as a Grafana agent helm chart. The `service_name` label is set -to `{__meta_kubernetes_namespace}/{__meta_kubernetes_pod_container_name}` from kubernetes meta labels. +`discovery.kubernetes`. Pod selection relies on the `HOSTNAME` environment variable, which is a pod name if {{< param "PRODUCT_ROOT_NAME" >}} is +used as a {{< param "PRODUCT_ROOT_NAME" >}} Helm chart. The `service_name` label is set +to `{__meta_kubernetes_namespace}/{__meta_kubernetes_pod_container_name}` from Kubernetes meta labels. ```river discovery.kubernetes "all_pods" { @@ -287,3 +288,21 @@ pyroscope.ebpf "default" { targets = discovery.relabel.local_containers.output } ``` + + +## Compatible components + +`pyroscope.ebpf` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/pyroscope.scrape.md b/docs/sources/flow/reference/components/pyroscope.scrape.md index 37a90ef89cc8..c2c54a83bfc8 100644 --- a/docs/sources/flow/reference/components/pyroscope.scrape.md +++ b/docs/sources/flow/reference/components/pyroscope.scrape.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/pyroscope.scrape/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.scrape/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.scrape/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.scrape/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.scrape/ +description: Learn about pyroscope.scrape labels: stage: beta title: pyroscope.scrape -description: Learn about pyroscope.scrape --- # pyroscope.scrape -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `pyroscope.scrape` configures a [pprof] scraping job for a given set of `targets`. The scraped performance profiles are forwarded to the list of receivers passed in @@ -53,8 +54,8 @@ Name | Type | Description | Default | Required `forward_to` | `list(ProfilesReceiver)` | List of receivers to send scraped profiles to. | | yes `job_name` | `string` | The job name to override the job label with. | component name | no `params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no -`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape config. | `"15s"` | no -`scrape_timeout` | `duration` | The timeout for scraping targets of this config. | `"15s"` | no +`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"15s"` | no +`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"15s"` | no `scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no `bearer_token` | `secret` | Bearer token to authenticate with. | | no `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no @@ -93,7 +94,7 @@ The following blocks are supported inside the definition of `pyroscope.scrape`: | profiling_config > profile.godeltaprof_mutex | [profile.godeltaprof_mutex][] | Collect [godeltaprof][] mutex profiles. | no | | profiling_config > profile.godeltaprof_block | [profile.godeltaprof_block][] | Collect [godeltaprof][] block profiles. | no | | profiling_config > profile.custom | [profile.custom][] | Collect custom profiles. | no | -| clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no | +| clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no | The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -122,19 +123,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### profiling_config block @@ -304,7 +305,7 @@ Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- `enabled` | `bool` | Enables sharing targets with other cluster nodes. | `false` | yes -When the agent is [using clustering][], and `enabled` is set to true, +When {{< param "PRODUCT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `pyroscope.scrape` component instance opts-in to participating in the cluster to distribute scrape load between all cluster nodes. @@ -313,11 +314,11 @@ subset per node, where each node is roughly assigned the same number of targets. If the state of the cluster changes, such as a new node joins, then the subset of targets to scrape per node will be recalculated. -When clustering mode is enabled, all agents participating in the cluster must +When clustering mode is enabled, all {{< param "PRODUCT_ROOT_NAME" >}}s participating in the cluster must use the same configuration file and have access to the same service discovery APIs. -If the agent is _not_ running in clustered mode, this block is a no-op. +If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, this block is a no-op. [using clustering]: {{< relref "../../concepts/clustering.md" >}} @@ -353,7 +354,7 @@ label `__address__` _must always_ be present and corresponds to the `:` that is used for the scrape request. The special label `service_name` is required and must always be present. If it's not specified, it is -attempted to be inferred from multiple sources: +attempted to be inferred from multiple sources: - `__meta_kubernetes_pod_annotation_pyroscope_io_service_name` which is a `pyroscope.io/service_name` pod annotation. - `__meta_kubernetes_namespace` and `__meta_kubernetes_pod_container_name` - `__meta_docker_container_name` @@ -391,7 +392,7 @@ can help pin down a scrape target. ## Example -The following example sets up the scrape job with certain attributes (profiling config, targets) and lets it scrape two local applications (the Agent itself and Pyroscope). +The following example sets up the scrape job with certain attributes (profiling configuration, targets) and lets it scrape two local applications ({{< param "PRODUCT_ROOT_NAME" >}} itself and Pyroscope). The exposed profiles are sent over to the provided list of receivers, as defined by other components. ```river @@ -427,3 +428,22 @@ http://localhost:12345/debug/pprof/goroutine http://localhost:12345/debug/pprof/profile?seconds=14 http://localhost:12345/debug/fgprof?seconds=14 ``` + + + +## Compatible components + +`pyroscope.scrape` can accept arguments from the following components: + +- Components that export [Targets]({{< relref "../compatibility/#targets-exporters" >}}) +- Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) + + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/pyroscope.write.md b/docs/sources/flow/reference/components/pyroscope.write.md index 4f45edf1beb6..38b6b542abc0 100644 --- a/docs/sources/flow/reference/components/pyroscope.write.md +++ b/docs/sources/flow/reference/components/pyroscope.write.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/pyroscope.write/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.write/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.write/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.write/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.write/ +description: Learn about pyroscope.write labels: stage: beta title: pyroscope.write -description: Learn about pyroscope.write --- # pyroscope.write -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} `pyroscope.write` receives performance profiles from other components and forwards them to a series of user-supplied endpoints using [Pyroscope' Push API](/oss/pyroscope/). @@ -20,7 +21,7 @@ to a series of user-supplied endpoints using [Pyroscope' Push API](/oss/pyroscop Multiple `pyroscope.write` components can be specified by giving them different labels. -## Usage for Grafana Agent flow mode +## Usage ```river pyroscope.write "LABEL" { @@ -101,19 +102,19 @@ configured locations. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields @@ -159,3 +160,19 @@ pyroscope.scrape "default" { forward_to = [pyroscope.write.staging.receiver] } ``` + + +## Compatible components + +`pyroscope.write` has exports that can be consumed by the following components: + +- Components that consume [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/remote.http.md b/docs/sources/flow/reference/components/remote.http.md index d90db87adb46..98148ad78908 100644 --- a/docs/sources/flow/reference/components/remote.http.md +++ b/docs/sources/flow/reference/components/remote.http.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/remote.http/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.http/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.http/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.http/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.http/ -title: remote.http description: Learn about remote.http +title: remote.http --- # remote.http @@ -36,6 +37,7 @@ Name | Type | Description | Default | Required `url` | `string` | URL to poll. | | yes `method` | `string` | Define HTTP method for the request | `"GET"` | no `headers` | `map(string)` | Custom headers for the request. | `{}` | no +`body` | `string` | The request body. | `""` | no `poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no `poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no `is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no @@ -52,7 +54,7 @@ The poll is successful if the URL returns a `200 OK` response code. All other response codes are treated as errors and mark the component as unhealthy. After a successful poll, the response body from the URL is exported. -[secret]: {{< relref "../../config-language/expressions/types_and_values.md#secrets" >}} +[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} ## Blocks @@ -83,34 +85,34 @@ basic_auth` refers to an `basic_auth` block defined inside a `client` block. The `client` block configures settings used to connect to the HTTP server. -{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" version="" >}} ### basic_auth block The `basic_auth` block configures basic authentication to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block The `authorization` block configures custom authorization to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block The `oauth2` block configures OAuth2 authorization to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block The `tls_config` block configures TLS settings for connecting to HTTPS servers. -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ### fallback_cache (experimental) diff --git a/docs/sources/flow/reference/components/remote.kubernetes.configmap.md b/docs/sources/flow/reference/components/remote.kubernetes.configmap.md index d958c5141139..56acc6bbcc4e 100644 --- a/docs/sources/flow/reference/components/remote.kubernetes.configmap.md +++ b/docs/sources/flow/reference/components/remote.kubernetes.configmap.md @@ -1,14 +1,17 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.kubernetes.configmap/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.kubernetes.configmap/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.kubernetes.configmap/ -title: remote.kubernetes.configmap description: Learn about remote.kubernetes.configmap +title: remote.kubernetes.configmap --- # remote.kubernetes.configmap `remote.kubernetes.configmap` reads a ConfigMap from the Kubernetes API server and exposes its data for other components to consume. -This can be useful anytime the agent needs data from a ConfigMap that is not directly mounted to the Grafana Agent pod. +This can be useful anytime {{< param "PRODUCT_NAME" >}} needs data from a ConfigMap that is not directly mounted to the {{< param "PRODUCT_ROOT_NAME" >}} pod. ## Usage @@ -65,7 +68,7 @@ refers to a `basic_auth` block defined inside a `client` block. ### client block The `client` block configures the Kubernetes client used to discover Probes. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -88,19 +91,19 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.kubernetes.secret.md b/docs/sources/flow/reference/components/remote.kubernetes.secret.md index f72eab8fde72..3fe84fee4ad0 100644 --- a/docs/sources/flow/reference/components/remote.kubernetes.secret.md +++ b/docs/sources/flow/reference/components/remote.kubernetes.secret.md @@ -1,14 +1,17 @@ --- +aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.kubernetes.secret/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.kubernetes.secret/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.kubernetes.secret/ -title: remote.kubernetes.secret description: Learn about remote.kubernetes.secret +title: remote.kubernetes.secret --- # remote.kubernetes.secret `remote.kubernetes.secret` reads a Secret from the Kubernetes API server and exposes its data for other components to consume. -A common use case for this is loading credentials or other information from secrets that are not already mounted into the agent pod at deployment time. +A common use case for this is loading credentials or other information from secrets that are not already mounted into the {{< param "PRODUCT_ROOT_NAME" >}} pod at deployment time. ## Usage @@ -65,8 +68,7 @@ refers to a `basic_auth` block defined inside a `client` block. ### client block The `client` block configures the Kubernetes client used to discover Probes. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running Grafana Agent pod is -used. +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: @@ -88,19 +90,19 @@ Name | Type | Description | Default | Required ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.s3.md b/docs/sources/flow/reference/components/remote.s3.md index 897e78f112b5..c4ec8e195e86 100644 --- a/docs/sources/flow/reference/components/remote.s3.md +++ b/docs/sources/flow/reference/components/remote.s3.md @@ -3,9 +3,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/components/remote.s3/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.s3/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.s3/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.s3/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.s3/ -title: remote.s3 description: Learn about remote.s3 +title: remote.s3 --- # remote.s3 @@ -43,7 +44,7 @@ Name | Type | Description | Default | Required > **NOTE**: `path` must include a full path to a file. This does not support reading of directories. -[secret]: {{< relref "../../config-language/expressions/types_and_values.md#secrets" >}} +[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} ## Blocks diff --git a/docs/sources/flow/reference/components/remote.vault.md b/docs/sources/flow/reference/components/remote.vault.md index 17bae832a81f..a4491bd25c66 100644 --- a/docs/sources/flow/reference/components/remote.vault.md +++ b/docs/sources/flow/reference/components/remote.vault.md @@ -1,12 +1,13 @@ --- aliases: -- /docs/agent/latest/flow/reference/components/remote.vault +- /docs/agent/latest/flow/reference/components/remote.vault/ - /docs/grafana-cloud/agent/flow/reference/components/remote.vault/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.vault/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.vault/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.vault/ canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.vault/ -title: remote.vault description: Learn about remote.vault +title: remote.vault --- # remote.vault diff --git a/docs/sources/flow/reference/config-blocks/_index.md b/docs/sources/flow/reference/config-blocks/_index.md index e7e24b9a461c..bf528e3a16e5 100644 --- a/docs/sources/flow/reference/config-blocks/_index.md +++ b/docs/sources/flow/reference/config-blocks/_index.md @@ -3,16 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/config-blocks/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/ +- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/ canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/ -title: Configuration blocks description: Learn about configuration blocks +title: Configuration blocks weight: 200 --- # Configuration blocks Configuration blocks are optional top-level blocks that can be used to -configure various parts of the Grafana Agent process. Each config block can +configure various parts of the {{< param "PRODUCT_NAME" >}} process. Each configuration block can only be defined once. Configuration blocks are _not_ components, so they have no exports. diff --git a/docs/sources/flow/reference/config-blocks/argument.md b/docs/sources/flow/reference/config-blocks/argument.md index 08241ec88732..3e2f4e1a0153 100644 --- a/docs/sources/flow/reference/config-blocks/argument.md +++ b/docs/sources/flow/reference/config-blocks/argument.md @@ -3,10 +3,11 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/config-blocks/argument/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/argument/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/argument/ +- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/argument/ canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/argument/ -title: argument block -menuTitle: argument description: Learn about the argument configuration block +menuTitle: argument +title: argument block --- # argument block @@ -16,7 +17,7 @@ input to a [Module][Modules]. `argument` blocks must be given a label which determines the name of the argument. The `argument` block may not be specified in the main configuration file given -to Grafana Agent Flow. +to {{< param "PRODUCT_NAME" >}}. [Modules]: {{< relref "../../concepts/modules.md" >}} @@ -34,11 +35,11 @@ argument "ARGUMENT_NAME" {} The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`optional` | `bool` | Whether the argument may be omitted. | `false` | no -`comment` | `string` | Description for the argument. | `false` | no -`default` | `any` | Default value for the argument. | `null` | no +Name | Type | Description | Default | Required +-----------|----------|--------------------------------------|---------|--------- +`comment` | `string` | Description for the argument. | `false` | no +`default` | `any` | Default value for the argument. | `null` | no +`optional` | `bool` | Whether the argument may be omitted. | `false` | no By default, all module arguments are required. The `optional` argument can be used to mark the module argument as optional. When `optional` is `true`, the @@ -58,7 +59,7 @@ value provided by the module loader. ## Example -This example creates a module where agent metrics are collected. Collected +This example creates a module where {{< param "PRODUCT_NAME" >}} metrics are collected. Collected metrics are then forwarded to the argument specified by the loader: ```river diff --git a/docs/sources/flow/reference/config-blocks/export.md b/docs/sources/flow/reference/config-blocks/export.md index 1a376ee050f1..950455ffbbf4 100644 --- a/docs/sources/flow/reference/config-blocks/export.md +++ b/docs/sources/flow/reference/config-blocks/export.md @@ -3,20 +3,19 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/config-blocks/export/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/export/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/export/ +- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/export/ canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/export/ -title: export block -menuTitle: export description: Learn about the export configuration block +menuTitle: export +title: export block --- # export block -`export` is an optional configuration block used to specify an emitted value of -a [Module][Modules]. `export` blocks must be given a label which determine the -name of the export. +`export` is an optional configuration block used to specify an emitted value of a [Module][Modules]. +`export` blocks must be given a label which determine the name of the export. -The `export` block may not be specified in the main configuration file given -to Grafana Agent Flow. +The `export` block may not be specified in the main configuration file given to {{< param "PRODUCT_NAME" >}}. [Modules]: {{< relref "../../concepts/modules.md" >}} @@ -32,22 +31,20 @@ export "ARGUMENT_NAME" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`value` | `any` | Value to export. | yes +Name | Type | Description | Default | Required +--------|-------|------------------|---------|--------- +`value` | `any` | Value to export. | | yes -The `value` argument determines what the value of the export will be. To expose -an exported field of another component to the module loader, set `value` to an -expression which references that exported value. +The `value` argument determines what the value of the export will be. +To expose an exported field of another component to the module loader, set `value` to an expression which references that exported value. ## Exported fields -The `export` block does not export any fields. +The `export` block doesn't export any fields. ## Example -This example creates a module where the output of discovering Kubernetes pods -and nodes are exposed to the module loader: +This example creates a module where the output of discovering Kubernetes pods and nodes are exposed to the module loader: ```river discovery.kubernetes "pods" { diff --git a/docs/sources/flow/reference/config-blocks/http.md b/docs/sources/flow/reference/config-blocks/http.md index 98ac938b6395..39ffa5b2502c 100644 --- a/docs/sources/flow/reference/config-blocks/http.md +++ b/docs/sources/flow/reference/config-blocks/http.md @@ -3,26 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/config-blocks/http/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/http/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/http/ +- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/http/ canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/http/ -title: http block -menuTitle: http description: Learn about the http configuration block +menuTitle: http +title: http block --- # http block -`http` is an optional configuration block used to customize how Grafana Agent's -HTTP server functions. `http` is specified without a label and can only be -provided once per configuration file. - -{{% admonition type="note" %}} -While the `http` block can reference component exports, some components that -rely on the HTTP server have a hidden dependency on the `http` block that may -result in a circular dependency error. - -Only references to components named `remote.*` or `local.*` are guaranteed to -work without any circular dependency errors. -{{% /admonition %}} +`http` is an optional configuration block used to customize how the {{< param "PRODUCT_NAME" >}} HTTP server functions. +`http` is specified without a label and can only be provided once per configuration file. ## Example @@ -37,19 +28,18 @@ http { ## Arguments -The `http` block supports no arguments and is configured completely through -inner blocks. +The `http` block supports no arguments and is configured completely through inner blocks. ## Blocks The following blocks are supported inside the definition of `http`: -Hierarchy | Block | Description | Required ---------- |--------------------------------|---------------------------------------------------------------| -------- -tls | [tls][] | Define TLS settings for the HTTP server. | no -tls > windows_certificate_filter | [windows_certificate_filter][] | Configure Windows certificate store for all certificates. | no -tls > windows_certificate_filter > server | [server][] | Configure server certificates for Windows certificate filter. | no +Hierarchy | Block | Description | Required +------------------------------------------|--------------------------------|---------------------------------------------------------------|--------- +tls | [tls][] | Define TLS settings for the HTTP server. | no +tls > windows_certificate_filter | [windows_certificate_filter][] | Configure Windows certificate store for all certificates. | no tls > windows_certificate_filter > client | [client][] | Configure client certificates for Windows certificate filter. | no +tls > windows_certificate_filter > server | [server][] | Configure server certificates for Windows certificate filter. | no [tls]: #tls-block [windows_certificate_filter]: #windows-certificate-filter-block @@ -61,14 +51,10 @@ tls > windows_certificate_filter > client | [client][] | Con The `tls` block configures TLS settings for the HTTP server. {{% admonition type="warning" %}} -If you add the `tls` block and reload the configuration when Grafana -Agent is running, existing connections will continue communicating over -plaintext. Similarly, if you remove the `tls` block and reload the configuration -when Grafana Agent is running, existing connections will continue -communicating over TLS. - -To ensure all connections use TLS, configure the `tls` block before you start -Grafana Agent. +If you add the `tls` block and reload the configuration when {{< param "PRODUCT_NAME" >}} is running, existing connections will continue communicating over plaintext. +Similarly, if you remove the `tls` block and reload the configuration when {{< param "PRODUCT_NAME" >}} is running, existing connections will continue communicating over TLS. + +To ensure all connections use TLS, configure the `tls` block before you start {{< param "PRODUCT_NAME" >}}. {{% /admonition %}} Name | Type | Description | Default | Required @@ -79,7 +65,7 @@ Name | Type | Description | Default | Required `key_file` | `string` | Path to the server TLS key on disk. | `""` | conditionally `client_ca_pem` | `string` | PEM data of the client CA to validate requests against. | `""` | no `client_ca_file` | `string` | Path to the client CA file on disk to validate requests against. | `""` | no -`client_auth` | `string` | Client authentication to use. | `"NoClientCert"` | no +`client_auth_type` | `string` | Client authentication to use. | `"NoClientCert"` | no `cipher_suites` | `list(string)` | Set of cipher suites to use. | `[]` | no `curve_preferences` | `list(string)` | Set of elliptic curves to use in a handshake. | `[]` | no `min_version` | `string` | Oldest TLS version to accept from clients. | `""` | no @@ -96,12 +82,12 @@ configured at a time: * `key_pem` and `key_file` * `client_ca_pem` and `client_ca_file` -The `client_auth` argument determines whether to validate client certificates. +The `client_auth_type` argument determines whether to validate client certificates. The default value, `NoClientCert`, indicates that the client certificate is not validated. The `client_ca_pem` and `client_ca_file` arguments may only -be configured when `client_auth` is not `NoClientCert`. +be configured when `client_auth_type` is not `NoClientCert`. -The following values are accepted for `client_auth`: +The following values are accepted for `client_auth_type`: * `NoClientCert`: client certificates are neither requested nor validated. * `RequestClientCert`: requests clients to send an optional certificate. Certificates provided by clients are not validated. @@ -110,7 +96,7 @@ The following values are accepted for `client_auth`: * `RequireAndVerifyClientCert`: requires clients to send a valid certificate. The `client_ca_pem` or `client_ca_file` arguments may be used to perform client -certificate validation. These arguments may only be provided when `client_auth` +certificate validation. These arguments may only be provided when `client_auth_type` is not set to `NoClientCert`. The `cipher_suites` argument determines what cipher suites to use. If not @@ -183,16 +169,16 @@ will serve the found certificate even if it is not compatible with the specified ### server block -The `server` block is used to find the certificate to check the signer. If multiple certificates are found the +The `server` block is used to find the certificate to check the signer. If multiple certificates are found the `windows_certificate_filter` will choose the certificate with the expiration farthest in the future. -Name | Type | Description | Default | Required ----- |----------------|-------------------------------------------------------------------------------------------|---------| -------- -`store` | `string` | Name of the system store to look for the server Certificate, for example, LocalMachine, CurrentUser. | `""` | yes -`system_store` | `string` | Name of the store to look for the server Certificate, for example, My, CA. | `""` | yes -`issuer_common_names` | `list(string)` | Issuer common names to check against. | | no -`template_id` | `string` | Server Template ID to match in ASN1 format, for example, "1.2.3". | `""` | no -`refresh_interval` | `string` | How often to check for a new server certificate. | `"5m"` | no +Name | Type | Description | Default | Required +----------------------|----------------|------------------------------------------------------------------------------------------------------|---------|--------- +`store` | `string` | Name of the system store to look for the server Certificate, for example, LocalMachine, CurrentUser. | `""` | yes +`system_store` | `string` | Name of the store to look for the server Certificate, for example, My, CA. | `""` | yes +`issuer_common_names` | `list(string)` | Issuer common names to check against. | | no +`template_id` | `string` | Server Template ID to match in ASN1 format, for example, "1.2.3". | `""` | no +`refresh_interval` | `string` | How often to check for a new server certificate. | `"5m"` | no @@ -200,9 +186,8 @@ Name | Type | Description The `client` block is used to check the certificate presented to the server. -Name | Type | Description | Default | Required ----- |----------------|--------------------------------------------------------|-----| -------- -`issuer_common_names` | `list(string)` | Issuer common names to check against. | | no -`subject_regex` | `string` | Regular expression to match Subject name. | `""` | no -`template_id` | `string` | Client Template ID to match in ASN1 format, for example, "1.2.3". | `""` | no - +Name | Type | Description | Default | Required +----------------------|----------------|-------------------------------------------------------------------|---------|--------- +`issuer_common_names` | `list(string)` | Issuer common names to check against. | | no +`subject_regex` | `string` | Regular expression to match Subject name. | `""` | no +`template_id` | `string` | Client Template ID to match in ASN1 format, for example, "1.2.3". | `""` | no diff --git a/docs/sources/flow/reference/config-blocks/logging.md b/docs/sources/flow/reference/config-blocks/logging.md index 55012461626b..23f3e84e90e8 100644 --- a/docs/sources/flow/reference/config-blocks/logging.md +++ b/docs/sources/flow/reference/config-blocks/logging.md @@ -3,17 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/config-blocks/logging/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/logging/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/logging/ +- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/logging/ canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/logging/ -title: logging block -menuTitle: logging description: Learn about the logging configuration block +menuTitle: logging +title: logging block --- # logging block -`logging` is an optional configuration block used to customize how Grafana -Agent produces log messages. `logging` is specified without a label and can -only be provided once per configuration file. +`logging` is an optional configuration block used to customize how {{< param "PRODUCT_NAME" >}} produces log messages. +`logging` is specified without a label and can only be provided once per configuration file. ## Example @@ -28,11 +28,11 @@ logging { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`level` | `string` | Level at which log lines should be written | `"info"` | no -`format` | `string` | Format to use for writing log lines | `"logfmt"` | no -`write_to` | `list(LogsReceiver)` | List of receivers to send log entries to | | no +Name | Type | Description | Default | Required +-----------|----------------------|--------------------------------------------|------------|--------- +`level` | `string` | Level at which log lines should be written | `"info"` | no +`format` | `string` | Format to use for writing log lines | `"logfmt"` | no +`write_to` | `list(LogsReceiver)` | List of receivers to send log entries to | | no ### Log level @@ -54,27 +54,19 @@ The following strings are recognized as valid log line formats: ### Log receivers -The `write_to` argument allows the Agent to tee its log entries to one or more -`loki.*` component log receivers in addition to the default [location][]. -This, for example can be the export of a `loki.write` component to ship log -entries directly to Loki, or a `loki.relabel` component to add a certain label -first. +The `write_to` argument allows {{< param "PRODUCT_NAME" >}} to tee its log entries to one or more `loki.*` component log receivers in addition to the default [location][]. +This, for example can be the export of a `loki.write` component to ship log entries directly to Loki, or a `loki.relabel` component to add a certain label first. [location]: #log-location ## Log location -Grafana Agent writes all logs to `stderr`. +{{< param "PRODUCT_NAME" >}} writes all logs to `stderr`. -When running Grafana Agent as a systemd service, view logs written to `stderr` -through `journald`. +When running {{< param "PRODUCT_NAME" >}} as a systemd service, view logs written to `stderr` through `journald`. -When running Grafana Agent as a container, view logs written to `stderr` -through `docker logs` or `kubectl logs`, depending on whether Docker or -Kubernetes was used for deploying the agent. +When running {{< param "PRODUCT_NAME" >}} as a container, view logs written to `stderr` through `docker logs` or `kubectl logs`, depending on whether Docker or Kubernetes was used for deploying {{< param "PRODUCT_NAME" >}}. -When running Grafana Agent as a Windows service, logs are instead written as -event logs; view logs through Event Viewer. +When running {{< param "PRODUCT_NAME" >}} as a Windows service, logs are instead written as event logs. You can view the logs through Event Viewer. -In other cases, redirect `stderr` of the Grafana Agent process to a file for -logs to persist on disk. +In other cases, redirect `stderr` of the {{< param "PRODUCT_NAME" >}} process to a file for logs to persist on disk. diff --git a/docs/sources/flow/reference/config-blocks/tracing.md b/docs/sources/flow/reference/config-blocks/tracing.md index 269e4969ae6a..860c8e4c7984 100644 --- a/docs/sources/flow/reference/config-blocks/tracing.md +++ b/docs/sources/flow/reference/config-blocks/tracing.md @@ -3,17 +3,17 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/config-blocks/tracing/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/tracing/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/tracing/ +- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/tracing/ canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/tracing/ -title: tracing block -menuTitle: tracing description: Learn about the tracing configuration block +menuTitle: tracing +title: tracing block --- # tracing block -`tracing` is an optional configuration block used to customize how Grafana Agent -produces traces. `tracing` is specified without a label and can only be provided -once per configuration file. +`tracing` is an optional configuration block used to customize how {{< param "PRODUCT_NAME" >}} produces traces. +`tracing` is specified without a label and can only be provided once per configuration file. ## Example @@ -40,10 +40,10 @@ otelcol.exporter.otlp "tempo" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`sampling_fraction` | `number` | Fraction of traces to keep. | `0.1` | no -`write_to` | `list(otelcol.Consumer)` | Inputs from `otelcol` components to send traces to. | `[]` | no +Name | Type | Description | Default | Required +--------------------|--------------------------|-----------------------------------------------------|---------|--------- +`sampling_fraction` | `number` | Fraction of traces to keep. | `0.1` | no +`write_to` | `list(otelcol.Consumer)` | Inputs from `otelcol` components to send traces to. | `[]` | no The `write_to` argument controls which components to send traces to for processing. The elements in the array can be any `otelcol` component that @@ -62,10 +62,10 @@ kept. The following blocks are supported inside the definition of `tracing`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -sampler | [sampler][] | Define custom sampling on top of the base sampling fraction. | no -sampler > jaeger_remote | [jaeger_remote][] | Retrieve sampling information via a Jaeger remote sampler. | no +Hierarchy | Block | Description | Required +------------------------|-------------------|--------------------------------------------------------------|--------- +sampler | [sampler][] | Define custom sampling on top of the base sampling fraction. | no +sampler > jaeger_remote | [jaeger_remote][] | Retrieve sampling information via a Jaeger remote sampler. | no The `>` symbol indicates deeper levels of nesting. For example, `sampler > jaeger_remote` refers to a `jaeger_remote` block defined inside an `sampler` @@ -87,14 +87,19 @@ It is invalid to define more than one sampler to use in the `sampler` block. The `jaeger_remote` block configures the retrieval of sampling information through a remote server that exposes Jaeger sampling strategies. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`url` | `string` | URL to retrieve sampling strategies from. | `"http://127.0.0.1:5778/sampling"` | no -`max_operations` | `number` | Limit number of operations which can have custom sampling. | `256` | no -`refresh_interval` | `duration` | Frequency to poll the URL for new sampling strategies. | `"1m"` | no +Name | Type | Description | Default | Required +-------------------|------------|------------------------------------------------------------|------------------------------------|--------- +`url` | `string` | URL to retrieve sampling strategies from. | `"http://127.0.0.1:5778/sampling"` | no +`max_operations` | `number` | Limit number of operations which can have custom sampling. | `256` | no +`refresh_interval` | `duration` | Frequency to poll the URL for new sampling strategies. | `"1m"` | no The remote sampling strategies are retrieved from the URL specified by the -`url` argument, and polled for updates on a timer. The frequency for how often +`url` argument, and polled for updates on a timer. The frequency for how oftenName | Type | Description | Default | Required +---- | ---- | ----------- | ------- | -------- +`names` | `list(string)` | DNS names to look up. | | yes +`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no +`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no +`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no polling occurs is controlled by the `refresh_interval` argument. Requests to the remote sampling strategies server are made through an HTTP diff --git a/docs/sources/flow/reference/stdlib/_index.md b/docs/sources/flow/reference/stdlib/_index.md index f08d4fc47d01..8f42f4bc28d4 100644 --- a/docs/sources/flow/reference/stdlib/_index.md +++ b/docs/sources/flow/reference/stdlib/_index.md @@ -3,10 +3,12 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/ - standard-library/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/ +description: The standard library is a list of functions used in expressions when + assigning values to attributes title: Standard library -description: The standard library is a list of functions used in expressions when assigning values to attributes weight: 400 --- diff --git a/docs/sources/flow/reference/stdlib/coalesce.md b/docs/sources/flow/reference/stdlib/coalesce.md index 61c84e688efa..73f5cd444821 100644 --- a/docs/sources/flow/reference/stdlib/coalesce.md +++ b/docs/sources/flow/reference/stdlib/coalesce.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/coalesce/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/coalesce/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/coalesce/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/coalesce/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/coalesce/ -title: coalesce description: Learn about coalesce +title: coalesce --- # coalesce diff --git a/docs/sources/flow/reference/stdlib/concat.md b/docs/sources/flow/reference/stdlib/concat.md index bbab029b0db4..36e7eba906a6 100644 --- a/docs/sources/flow/reference/stdlib/concat.md +++ b/docs/sources/flow/reference/stdlib/concat.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/concat/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/concat/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/concat/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/concat/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/concat/ -title: concat description: Learn about concat +title: concat --- # concat diff --git a/docs/sources/flow/reference/stdlib/constants.md b/docs/sources/flow/reference/stdlib/constants.md index 4b1766ace290..3caf5c336a7c 100644 --- a/docs/sources/flow/reference/stdlib/constants.md +++ b/docs/sources/flow/reference/stdlib/constants.md @@ -4,20 +4,21 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/constants/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/constants/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/constants/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/constants/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/constants/ -title: constants description: Learn about constants +title: constants --- # constants The `constants` object exposes a list of constant values about the system -Grafana Agent is running on: +{{< param "PRODUCT_NAME" >}} is running on: -* `constants.hostname`: The hostname of the machine Grafana Agent is running +* `constants.hostname`: The hostname of the machine {{< param "PRODUCT_NAME" >}} is running on. -* `constants.os`: The operating system Grafana Agent is running on. -* `constants.arch`: The architecture of the system Grafana Agent is running on. +* `constants.os`: The operating system {{< param "PRODUCT_NAME" >}} is running on. +* `constants.arch`: The architecture of the system {{< param "PRODUCT_NAME" >}} is running on. ## Examples diff --git a/docs/sources/flow/reference/stdlib/env.md b/docs/sources/flow/reference/stdlib/env.md index b3b0723351bb..49a65d1a6a8b 100644 --- a/docs/sources/flow/reference/stdlib/env.md +++ b/docs/sources/flow/reference/stdlib/env.md @@ -4,16 +4,16 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/env/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/env/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/env/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/env/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/env/ -title: env description: Learn about env +title: env --- # env -The `env` function gets the value of an environment variable from the system -Grafana Agent is running on. If the environment variable does not exist, `env` -returns an empty string. +The `env` function gets the value of an environment variable from the system {{< param "PRODUCT_NAME" >}} is running on. +If the environment variable does not exist, `env` returns an empty string. ## Examples diff --git a/docs/sources/flow/reference/stdlib/format.md b/docs/sources/flow/reference/stdlib/format.md index 1309796cdeac..be5d9cd754c1 100644 --- a/docs/sources/flow/reference/stdlib/format.md +++ b/docs/sources/flow/reference/stdlib/format.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/format/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/format/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/format/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/format/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/format/ -title: format description: Learn about format +title: format --- # format @@ -57,9 +58,9 @@ The specification may contain the following verbs. | `%%` | Literal percent sign, consuming no value. | | `%t` | Convert to boolean and produce `true` or `false`. | | `%b` | Convert to integer number and produce binary representation. | -| `%d` | Convert to integer and produce decimal representation. | -| `%o` | Convert to integer and produce octal representation. | -| `%x` | Convert to integer and produce hexadecimal representation with lowercase letters. | +| `%d` | Convert to integer and produce decimal representation. | +| `%o` | Convert to integer and produce octal representation. | +| `%x` | Convert to integer and produce hexadecimal representation with lowercase letters. | | `%X` | Like `%x`, but use uppercase letters. | | `%e` | Convert to number and produce scientific notation, like `-1.234456e+78`. | | `%E` | Like `%e`, but use an uppercase `E` to introduce the exponent. | diff --git a/docs/sources/flow/reference/stdlib/join.md b/docs/sources/flow/reference/stdlib/join.md index 8fd2d578c692..3203585c81c1 100644 --- a/docs/sources/flow/reference/stdlib/join.md +++ b/docs/sources/flow/reference/stdlib/join.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/join/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/join/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/join/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/join/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/join/ -title: join description: Learn about join +title: join --- # join diff --git a/docs/sources/flow/reference/stdlib/json_decode.md b/docs/sources/flow/reference/stdlib/json_decode.md index c82b2acdc09c..d56fc45dabab 100644 --- a/docs/sources/flow/reference/stdlib/json_decode.md +++ b/docs/sources/flow/reference/stdlib/json_decode.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/json_decode/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/json_decode/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/json_decode/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/json_decode/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/json_decode/ -title: json_decode description: Learn about json_decode +title: json_decode --- # json_decode diff --git a/docs/sources/flow/reference/stdlib/json_path.md b/docs/sources/flow/reference/stdlib/json_path.md index 386f27b061be..91058e6e31fe 100644 --- a/docs/sources/flow/reference/stdlib/json_path.md +++ b/docs/sources/flow/reference/stdlib/json_path.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/json_path/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/json_path/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/json_path/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/json_path/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/json_path/ -title: json_path description: Learn about json_path +title: json_path --- # json_path diff --git a/docs/sources/flow/reference/stdlib/nonsensitive.md b/docs/sources/flow/reference/stdlib/nonsensitive.md index c8f6b6bca14b..a2bb0bd31d49 100644 --- a/docs/sources/flow/reference/stdlib/nonsensitive.md +++ b/docs/sources/flow/reference/stdlib/nonsensitive.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/nonsensitive/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/nonsensitive/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/nonsensitive/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/nonsensitive/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/nonsensitive/ -title: nonsensitive description: Learn about nonsensitive +title: nonsensitive --- # nonsensitive @@ -19,7 +20,7 @@ description: Learn about nonsensitive > Strings resulting from calls to `nonsensitive` will be displayed in plaintext > in the UI and internal API calls. -[secret]: {{< relref "../../config-language/expressions/types_and_values.md#secrets" >}} +[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} ## Examples diff --git a/docs/sources/flow/reference/stdlib/replace.md b/docs/sources/flow/reference/stdlib/replace.md index dde0057d7840..2c1eb383f390 100644 --- a/docs/sources/flow/reference/stdlib/replace.md +++ b/docs/sources/flow/reference/stdlib/replace.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/replace/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/replace/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/replace/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/replace/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/replace/ -title: replace description: Learn about replace +title: replace --- # replace diff --git a/docs/sources/flow/reference/stdlib/split.md b/docs/sources/flow/reference/stdlib/split.md index 15a5f304f4d1..3087ca153669 100644 --- a/docs/sources/flow/reference/stdlib/split.md +++ b/docs/sources/flow/reference/stdlib/split.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/split/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/split/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/split/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/split/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/split/ -title: split description: Learn about split +title: split --- # split diff --git a/docs/sources/flow/reference/stdlib/to_lower.md b/docs/sources/flow/reference/stdlib/to_lower.md index d344850bd52e..8c252fb354a8 100644 --- a/docs/sources/flow/reference/stdlib/to_lower.md +++ b/docs/sources/flow/reference/stdlib/to_lower.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/to_lower/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/to_lower/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/to_lower/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/to_lower/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/to_lower/ -title: to_lower description: Learn about to_lower +title: to_lower --- # to_lower diff --git a/docs/sources/flow/reference/stdlib/to_upper.md b/docs/sources/flow/reference/stdlib/to_upper.md index 439cd64d8f48..aef26d5ff669 100644 --- a/docs/sources/flow/reference/stdlib/to_upper.md +++ b/docs/sources/flow/reference/stdlib/to_upper.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/to_upper/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/to_upper/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/to_upper/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/to_upper/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/to_upper/ -title: to_upper description: Learn about to_upper +title: to_upper --- # to_upper diff --git a/docs/sources/flow/reference/stdlib/trim.md b/docs/sources/flow/reference/stdlib/trim.md index 603078715ef6..5023d1f21328 100644 --- a/docs/sources/flow/reference/stdlib/trim.md +++ b/docs/sources/flow/reference/stdlib/trim.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/trim/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim/ -title: trim description: Learn about trim +title: trim --- # trim diff --git a/docs/sources/flow/reference/stdlib/trim_prefix.md b/docs/sources/flow/reference/stdlib/trim_prefix.md index 9179274e28a2..33d716f133e4 100644 --- a/docs/sources/flow/reference/stdlib/trim_prefix.md +++ b/docs/sources/flow/reference/stdlib/trim_prefix.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/trim_prefix/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_prefix/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_prefix/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_prefix/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_prefix/ -title: trim_prefix description: Learn about trim_prefix +title: trim_prefix --- # trim_prefix diff --git a/docs/sources/flow/reference/stdlib/trim_space.md b/docs/sources/flow/reference/stdlib/trim_space.md index 7ce358064f29..5e13e0ba0df3 100644 --- a/docs/sources/flow/reference/stdlib/trim_space.md +++ b/docs/sources/flow/reference/stdlib/trim_space.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/trim_space/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_space/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_space/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_space/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_space/ -title: trim_space description: Learn about trim_space +title: trim_space --- # trim_space diff --git a/docs/sources/flow/reference/stdlib/trim_suffix.md b/docs/sources/flow/reference/stdlib/trim_suffix.md index a24a5e6e4294..4741007ebe4b 100644 --- a/docs/sources/flow/reference/stdlib/trim_suffix.md +++ b/docs/sources/flow/reference/stdlib/trim_suffix.md @@ -4,9 +4,10 @@ aliases: - /docs/grafana-cloud/agent/flow/reference/stdlib/trim_suffix/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_suffix/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_suffix/ +- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_suffix/ canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_suffix/ -title: trim_suffix description: Learn about trim_suffix +title: trim_suffix --- # trim_suffix diff --git a/docs/sources/flow/release-notes.md b/docs/sources/flow/release-notes.md index f45b5d3291f7..f8053bf3c0b3 100644 --- a/docs/sources/flow/release-notes.md +++ b/docs/sources/flow/release-notes.md @@ -4,22 +4,23 @@ aliases: - /docs/grafana-cloud/agent/flow/release-notes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/release-notes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/release-notes/ +- /docs/grafana-cloud/send-data/agent/flow/release-notes/ canonical: https://grafana.com/docs/agent/latest/flow/release-notes/ -description: Release notes for Grafana Agent flow mode +description: Release notes for Grafana Agent Flow menuTitle: Release notes -title: Release notes for Grafana Agent flow mode +title: Release notes for Grafana Agent Flow weight: 999 --- -# Release notes for Grafana Agent flow mode +# Release notes for {{% param "PRODUCT_NAME" %}} -The release notes provide information about deprecations and breaking changes in Grafana Agent flow mode. +The release notes provide information about deprecations and breaking changes in {{< param "PRODUCT_NAME" >}}. -For a complete list of changes to Grafana Agent, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). +For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). {{% admonition type="note" %}} -These release notes are specific to Grafana Agent flow mode. -Other release notes for the different Grafana Agent variants are contained on separate pages: +These release notes are specific to {{< param "PRODUCT_NAME" >}}. +Other release notes for the different {{< param "PRODUCT_ROOT_NAME" >}} variants are contained on separate pages: * [Static mode release notes][release-notes-static] * [Static mode Kubernetes operator release notes][release-notes-operator] @@ -28,6 +29,53 @@ Other release notes for the different Grafana Agent variants are contained on se [release-notes-operator]: {{< relref "../operator/release-notes.md" >}} {{% /admonition %}} +## v0.39 + +### Breaking change: `otelcol.receiver.prometheus` will drop all `otel_scope_info` metrics when converting them to OTLP + +* If the `otel_scope_info` metric has the `otel_scope_name` and `otel_scope_version` labels, + their values are used to set the OTLP Instrumentation Scope name and version, respectively. +* Labels for `otel_scope_info` metrics other than `otel_scope_name` and `otel_scope_version` + are added as scope attributes with the matching name and version. + +### Breaking change: label for `target` block in `prometheus.exporter.blackbox` is removed + +Previously in `prometheus.exporter.blackbox`, the `target` block requires a label which is used in job's name. +In this version, user needs to be specify `name` attribute instead, which allow less restrictive naming. + +Old configuration example: + +```river +prometheus.exporter.blackbox "example" { + config_file = "blackbox_modules.yml" + + target "grafana" { + address = "http://grafana.com" + module = "http_2xx" + labels = { + "env": "dev", + } + } +} +``` + +New configuration example: + +```river +prometheus.exporter.blackbox "example" { + config_file = "blackbox_modules.yml" + + target { + name = "grafana" + address = "http://grafana.com" + module = "http_2xx" + labels = { + "env": "dev", + } + } +} +``` + ## v0.38 ### Breaking change: `otelcol.exporter.jaeger` component removed @@ -40,7 +88,7 @@ supports OTLP. ### Breaking change: Renamed `non_indexed_labels` Loki processing stage to `structured_metadata`. -If you use the Loki processing stage in your Agent configuration, you must rename the `non_indexed_labels` pipeline stage definition to `structured_metadata`. +If you use the Loki processing stage in your {{< param "PRODUCT_NAME" >}} configuration, you must rename the `non_indexed_labels` pipeline stage definition to `structured_metadata`. Old configuration example: @@ -57,7 +105,7 @@ stage.structured_metadata { } ``` -### Breaking change: `otelcol.exporter.prometheus` scope labels updated. +### Breaking change: `otelcol.exporter.prometheus` scope labels updated There are 2 changes to the way scope labels work for this component. @@ -91,7 +139,7 @@ prometheus.exporter.unix "example" { /* ... */ } ### Breaking change: The default value of `retry_on_http_429` is changed to `true` for the `queue_config` in `prometheus.remote_write` The default value of `retry_on_http_429` is changed from `false` to `true` for the `queue_config` block in `prometheus.remote_write` -so that the agent can retry sending and avoid data being lost for metric pipelines by default. +so that {{< param "PRODUCT_ROOT_NAME" >}} can retry sending and avoid data being lost for metric pipelines by default. * If you set the `retry_on_http_429` explicitly - no action is required. * If you do not set `retry_on_http_429` explicitly and you do *not* want to retry on HTTP 429, make sure you set it to `false` as you upgrade to this new version. @@ -107,12 +155,12 @@ format. By default, the decompression of files is entirely disabled. How to migrate: -* If your agent never reads logs from files with +* If {{< param "PRODUCT_NAME" >}} never reads logs from files with extensions `.gz`, `.tar.gz`, `.z` or `.bz2` then no action is required. - > You can check what are the file extensions your agent reads from by looking + > You can check what are the file extensions {{< param "PRODUCT_NAME" >}} reads from by looking at the `path` label on `loki_source_file_file_bytes_total` metric. -* If your agent extracts data from compressed files, please add the following +* If {{< param "PRODUCT_NAME" >}} extracts data from compressed files, please add the following configuration block to your `loki.source.file` component: ```river @@ -330,7 +378,7 @@ The change was made in PR [#18070](https://github.com/open-telemetry/opentelemet The `remote_sampling` block in `otelcol.receiver.jaeger` has been an undocumented no-op configuration for some time, and has now been removed. Customers are advised to use `otelcol.extension.jaeger_remote_sampling` instead. -### Deprecation: `otelcol.exporter.jaeger` has been deprecated and will be removed in Agent v0.38.0. +### Deprecation: `otelcol.exporter.jaeger` has been deprecated and will be removed in {{% param "PRODUCT_NAME" %}} v0.38.0. This is because Jaeger supports OTLP directly and OpenTelemetry Collector is also removing its [Jaeger receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/jaegerexporter). @@ -526,7 +574,7 @@ prometheus.exporter.unix { } As first announced in v0.30.0, support for using the `EXPERIMENTAL_ENABLE_FLOW` environment variable to enable Flow mode has been removed. -To enable Flow mode, set the `AGENT_MODE` environment variable to `flow`. +To enable {{< param "PRODUCT_NAME" >}}, set the `AGENT_MODE` environment variable to `flow`. ## v0.31 @@ -549,7 +597,7 @@ removed. ### Deprecation: `EXPERIMENTAL_ENABLE_FLOW` environment variable changed -As part of graduating Grafana Agent Flow to beta, the +As part of graduating {{< param "PRODUCT_NAME" >}} to beta, the `EXPERIMENTAL_ENABLE_FLOW` environment variable is replaced by setting `AGENT_MODE` to `flow`. diff --git a/docs/sources/flow/setup/_index.md b/docs/sources/flow/setup/_index.md index 48afb31b419b..d639fa3eaea1 100644 --- a/docs/sources/flow/setup/_index.md +++ b/docs/sources/flow/setup/_index.md @@ -3,15 +3,16 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/ +- /docs/grafana-cloud/send-data/agent/flow/setup/ canonical: https://grafana.com/docs/agent/latest/flow/setup/ -description: Learn how to install and configure Grafana Agent in flow mode -menuTitle: Set up flow mode -title: Set up Grafana Agent in flow mode +description: Learn how to install and configure Grafana Agent Flow +menuTitle: Set up Grafana Agent Flow +title: Set up Grafana Agent Flow weight: 50 --- -# Set up Grafana Agent in flow mode +# Set up {{% param "PRODUCT_NAME" %}} -This section includes information that helps you get Grafana Agent in flow mode installed and configured. +This section includes information that helps you install and configure {{< param "PRODUCT_NAME" >}}. {{< section >}} diff --git a/docs/sources/flow/setup/configure/_index.md b/docs/sources/flow/setup/configure/_index.md deleted file mode 100644 index 8a23c557f4e9..000000000000 --- a/docs/sources/flow/setup/configure/_index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/configure/ -description: Configure Grafana Agent in flow mode after it is installed -menuTitle: Configure flow mode -title: Configure Grafana Agent in flow mode -weight: 150 ---- - -# Configure Grafana Agent in flow mode - -You can configure Grafana Agent in flow mode after it is installed. The default River configuration file for flow mode is located at: - -* Linux: `/etc/grafana-agent-flow.river` -* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` -* Windows: `C:\Program Files\Grafana Agent Flow\config.river` - -This section includes information that helps you configure Grafana Agent in flow mode. - -{{< section >}} diff --git a/docs/sources/flow/setup/configure/configure-macos.md b/docs/sources/flow/setup/configure/configure-macos.md deleted file mode 100644 index ee9bf74dbf79..000000000000 --- a/docs/sources/flow/setup/configure/configure-macos.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-macos/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/configure/configure-macos/ -description: Learn how to configure Grafana Agent in flow mode on macOS -menuTitle: macOS -title: Configure Grafana Agent in flow mode on macOS -weight: 400 ---- - -# Configure Grafana Agent in flow mode on macOS - -To configure Grafana Agent in flow mode on macOS, perform the following steps: - -1. Edit the default configuration file at `$(brew --prefix)/etc/grafana-agent-flow/config.river`. - -1. Run the following command in a terminal to restart the Grafana Agent service: - - ```shell - brew services restart grafana-agent-flow - ``` - -## Configure the Grafana Agent service - -{{% admonition type="note" %}} -Due to limitations in Homebrew, customizing the service used by -Grafana Agent on macOS requires changing the Homebrew formula and -reinstalling Grafana Agent. -{{% /admonition %}} - -To customize the Grafana Agent service on macOS, perform the following -steps: - -1. Run the following command in a terminal: - - ```shell - brew edit grafana-agent-flow - ``` - - This will open the Grafana Agent Homebrew Formula in an editor. - -1. Modify the `service` section as desired to change things such as: - - * The River configuration file used by Grafana Agent. - * Flags passed to the Grafana Agent binary. - * Location of log files. - - When you are done, save the file. - -1. Reinstall the Grafana Agent Formula by running the following command in a terminal: - - ```shell - brew reinstall grafana-agent-flow - ``` - -1. Restart the Grafana Agent service by running the command in a terminal: - - ```shell - brew services restart grafana-agent-flow - ``` - -## Expose the UI to other machines - -By default, Grafana Agent listens on the local network for its HTTP -server. This prevents other machines on the network from being able to access -the [UI for debugging][UI]. - -To expose the UI to other machines, complete the following steps: - -1. Follow [Configure the Grafana Agent service](#configure-the-grafana-agent-service) - to edit command line flags passed to Grafana Agent, including the - following customizations: - - 1. Modify the line inside the `service` block containing - `--server.http.listen-addr=127.0.0.1:12345`, replacing `127.0.0.1` with - the address which other machines on the network have access to, like the - network IP address of the machine Grafana Agent is running on. - - To listen on all interfaces, replace `127.0.0.1` with `0.0.0.0`. - -{{% docs/reference %}} -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md#grafana-agent-flow-ui" -{{% /docs/reference %}} diff --git a/docs/sources/flow/setup/deploy-agent.md b/docs/sources/flow/setup/deploy-agent.md index fb372d8d14b0..8328e03b65b6 100644 --- a/docs/sources/flow/setup/deploy-agent.md +++ b/docs/sources/flow/setup/deploy-agent.md @@ -3,12 +3,13 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/deploy-agent/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/deploy-agent/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/deploy-agent/ +- /docs/grafana-cloud/send-data/agent/flow/setup/deploy-agent/ canonical: https://grafana.com/docs/agent/latest/flow/setup/start-agent/ -description: Learn about possible deployment topologies for Grafana Agent -menuTitle: Deploy Grafana Agent -title: Grafana Agent deployment topologies +description: Learn about possible deployment topologies for Grafana Agent Flow +menuTitle: Deploy Grafana Agent Flow +title: Grafana Agent Flow deployment topologies weight: 900 --- -{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} +{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} diff --git a/docs/sources/flow/setup/install/_index.md b/docs/sources/flow/setup/install/_index.md index b711666e84cc..8305e7bf9a39 100644 --- a/docs/sources/flow/setup/install/_index.md +++ b/docs/sources/flow/setup/install/_index.md @@ -3,17 +3,18 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/install/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/ +- /docs/grafana-cloud/send-data/agent/flow/setup/install/ - /docs/sources/flow/install/ canonical: https://grafana.com/docs/agent/latest/flow/setup/install/ -menuTitle: Install flow mode -title: Install Grafana Agent in flow mode -description: Learn how to install Grafana Agent in flow mode +description: Learn how to install Grafana Agent Flow +menuTitle: Install Grafana Agent Flow +title: Install Grafana Agent Flow weight: 50 --- -# Install Grafana Agent in flow mode +# Install {{% param "PRODUCT_NAME" %}} -You can install Grafana Agent in flow mode on Docker, Kubernetes, Linux, macOS, or Windows. +You can install {{< param "PRODUCT_NAME" >}} on Docker, Kubernetes, Linux, macOS, or Windows. The following architectures are supported: @@ -23,17 +24,17 @@ The following architectures are supported: - FreeBSD: AMD64 {{% admonition type="note" %}} -Installing Grafana Agent on other operating systems is possible, but is not recommended or supported. +Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but is not recommended or supported. {{% /admonition %}} {{< section >}} ## Data collection -By default, Grafana Agent sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information +By default, {{< param "PRODUCT_NAME" >}} sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information about what data is collected and how you can opt-out. {{% docs/reference %}} [data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/data-collection.md" +[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/binary.md b/docs/sources/flow/setup/install/binary.md index f8fb920e9a93..88560dbf63e0 100644 --- a/docs/sources/flow/setup/install/binary.md +++ b/docs/sources/flow/setup/install/binary.md @@ -4,27 +4,28 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/install/binary/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/binary/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/binary/ +- /docs/grafana-cloud/send-data/agent/flow/setup/install/binary/ canonical: https://grafana.com/docs/agent/latest/flow/setup/install/binary/ -description: Learn how to install Grafana Agent in flow mode as a standalone binary +description: Learn how to install Grafana Agent Flow as a standalone binary menuTitle: Standalone -title: Install Grafana Agent in flow mode as a standalone binary +title: Install Grafana Agent Flow as a standalone binary weight: 600 --- -# Install Grafana Agent in flow mode as a standalone binary +# Install {{% param "PRODUCT_NAME" %}} as a standalone binary -Grafana Agent is distributed as a standalone binary for the following operating systems and architectures: +{{< param "PRODUCT_NAME" >}} is distributed as a standalone binary for the following operating systems and architectures: * Linux: AMD64, ARM64 * Windows: AMD64 * macOS: AMD64 (Intel), ARM64 (Apple Silicon) * FreeBSD: AMD64 -## Download Grafana Agent +## Download {{% param "PRODUCT_ROOT_NAME" %}} -To download Grafana Agent as a standalone binary, perform the following steps. +To download {{< param "PRODUCT_NAME" >}} as a standalone binary, perform the following steps. -1. Navigate to the current Grafana Agent [release](https://github.com/grafana/agent/releases) page. +1. Navigate to the current {{< param "PRODUCT_ROOT_NAME" >}} [release](https://github.com/grafana/agent/releases) page. 1. Scroll down to the **Assets** section. @@ -32,22 +33,24 @@ To download Grafana Agent as a standalone binary, perform the following steps. 1. Extract the package contents into a directory. -1. If you are installing Grafana Agent on Linux, macOS, or FreeBSD, run the following command in a terminal: +1. If you are installing {{< param "PRODUCT_NAME" >}} on Linux, macOS, or FreeBSD, run the following command in a terminal: ```shell - chmod +x BINARY_PATH + chmod +x ``` - Replace `BINARY_PATH` with the path to the extracted binary + Replace the following: + + - _``_: The path to the extracted binary. ## Next steps -* [Start Grafana Agent][] -* [Configure Grafana Agent][] +- [Start {{< param "PRODUCT_NAME" >}}[Start] +- [Configure {{< param "PRODUCT_NAME" >}}[Configure] {{% docs/reference %}} -[Start Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#standalone-binary" -[Start Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent.md#standalone-binary" -[Configure Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/configure" -[Configure Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/" +[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#standalone-binary" +[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#standalone-binary" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/docker.md b/docs/sources/flow/setup/install/docker.md index 7497c104c9d8..9460ab71a8f3 100644 --- a/docs/sources/flow/setup/install/docker.md +++ b/docs/sources/flow/setup/install/docker.md @@ -4,16 +4,17 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/install/docker/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/docker/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/docker/ +- /docs/grafana-cloud/send-data/agent/flow/setup/install/docker/ canonical: https://grafana.com/docs/agent/latest/flow/setup/install/docker/ -description: Learn how to install Grafana Agent in flow mode on Docker +description: Learn how to install Grafana Agent Flow on Docker menuTitle: Docker -title: Run Grafana Agent in flow mode in a Docker container +title: Run Grafana Agent Flow in a Docker container weight: 100 --- -# Run Grafana Agent in flow mode in a Docker container +# Run {{% param "PRODUCT_NAME" %}} in a Docker container -Grafana Agent is available as a Docker container image on the following platforms: +{{< param "PRODUCT_NAME" >}} is available as a Docker container image on the following platforms: * [Linux containers][] for AMD64 and ARM64. * [Windows containers][] for AMD64. @@ -21,7 +22,7 @@ Grafana Agent is available as a Docker container image on the following platform ## Before you begin * Install [Docker][] on your computer. -* Create and save a Grafana Agent River configuration file on your computer, for example: +* Create and save a {{< param "PRODUCT_NAME" >}} River configuration file on your computer, for example: ```river logging { @@ -32,51 +33,57 @@ Grafana Agent is available as a Docker container image on the following platform ## Run a Linux Docker container -To run Grafana Agent in flow mode as a Linux Docker container, run the following command in a terminal window: +To run {{< param "PRODUCT_NAME" >}} as a Linux Docker container, run the following command in a terminal window: ```shell docker run \ -e AGENT_MODE=flow \ - -v CONFIG_FILE_PATH:/etc/agent/config.river \ + -v :/etc/agent/config.river \ -p 12345:12345 \ grafana/agent:latest \ run --server.http.listen-addr=0.0.0.0:12345 /etc/agent/config.river ``` -Replace `CONFIG_FILE_PATH` with the path of the configuration file on your host system. +Replace the following: -You can modify the last line to change the arguments passed to the Grafana Agent binary. -Refer to the documentation for [run][] for more information about the options available to the `run` command. +- _``_: The path of the configuration file on your host system. -> **Note:** Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. -> If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. +You can modify the last line to change the arguments passed to the {{< param "PRODUCT_NAME" >}} binary. +Refer to the documentation for [run][] for more information about the options available to the `run` command. +{{% admonition type="note" %}} +Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. +If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. +{{% /admonition %}} ## Run a Windows Docker container -To run Grafana Agent in flow mode as a Windows Docker container, run the following command in a terminal window: +To run {{< param "PRODUCT_NAME" >}} as a Windows Docker container, run the following command in a terminal window: ```shell docker run \ -e AGENT_MODE=flow \ - -v CONFIG_FILE_PATH:C:\etc\grafana-agent\config.river \ + -v :C:\etc\grafana-agent\config.river \ -p 12345:12345 \ grafana/agent:latest-windows \ run --server.http.listen-addr=0.0.0.0:12345 C:\etc\grafana-agent\config.river ``` -Replace `CONFIG_FILE_PATH` with the path of the configuration file on your host system. +Replace the following: -You can modify the last line to change the arguments passed to the Grafana Agent binary. -Refer to the documentation for [run][] for more information about the options available to the `run` command. +- _``_: The path of the configuration file on your host system. +You can modify the last line to change the arguments passed to the {{< param "PRODUCT_NAME" >}} binary. +Refer to the documentation for [run][] for more information about the options available to the `run` command. -> **Note:** Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. -> If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. +{{% admonition type="note" %}} +Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. +If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. +{{% /admonition %}} ## Verify -To verify that Grafana Agent is running successfully, navigate to and make sure the [Grafana Agent UI][UI] loads without error. +To verify that {{< param "PRODUCT_NAME" >}} is running successfully, navigate to and make sure the {{< param "PRODUCT_NAME" >}} [UI][] loads without error. [Linux containers]: #run-a-linux-docker-container [Windows containers]: #run-a-windows-docker-container @@ -84,7 +91,7 @@ To verify that Grafana Agent is running successfully, navigate to /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging.md#grafana-agent-flow-ui" +[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/kubernetes.md b/docs/sources/flow/setup/install/kubernetes.md index bf2307dd5bfd..3bd0a3240fbc 100644 --- a/docs/sources/flow/setup/install/kubernetes.md +++ b/docs/sources/flow/setup/install/kubernetes.md @@ -4,31 +4,32 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/install/kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/kubernetes/ +- /docs/grafana-cloud/send-data/agent/flow/setup/install/kubernetes/ canonical: https://grafana.com/docs/agent/latest/flow/setup/install/kubernetes/ -description: Learn how to deploy Grafana Agent in flow mode on Kubernetes +description: Learn how to deploy Grafana Agent Flow on Kubernetes menuTitle: Kubernetes -title: Deploy Grafana Agent in flow mode on Kubernetes +title: Deploy Grafana Agent Flow on Kubernetes weight: 200 --- -# Deploy Grafana Agent in flow mode on Kubernetes +# Deploy {{% param "PRODUCT_NAME" %}} on Kubernetes -Grafana Agent can be deployed on Kubernetes by using the Helm chart for Grafana Agent. +{{< param "PRODUCT_NAME" >}} can be deployed on Kubernetes by using the Helm chart for {{< param "PRODUCT_ROOT_NAME" >}}. ## Before you begin * Install [Helm][] on your computer. -* Configure a Kubernetes cluster that you can use for Grafana Agent. +* Configure a Kubernetes cluster that you can use for {{< param "PRODUCT_NAME" >}}. * Configure your local Kubernetes context to point to the cluster. ## Deploy {{% admonition type="note" %}} -These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for Grafana -Agent. You can deploy Grafana Agent either in static mode or flow mode. The Helm chart deploys Grafana Agent in flow mode by default. +These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for {{< param "PRODUCT_NAME" >}}. +You can deploy {{< param "PRODUCT_ROOT_NAME" >}} either in static mode or flow mode. The Helm chart deploys {{< param "PRODUCT_NAME" >}} by default. {{% /admonition %}} -To deploy Grafana Agent on Kubernetes using Helm, run the following commands in a terminal window: +To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the following commands in a terminal window: 1. Add the Grafana Helm chart repository: @@ -42,26 +43,27 @@ To deploy Grafana Agent on Kubernetes using Helm, run the following commands in helm repo update ``` -1. Install Grafana Agent: +1. Install {{< param "PRODUCT_ROOT_NAME" >}}: ```shell - helm install RELEASE_NAME grafana/grafana-agent + helm install grafana/grafana-agent ``` - Replace `RELEASE_NAME` with a name to use for your Grafana Agent - installation, such as `grafana-agent-flow`. + Replace the following: -For more information on the Grafana Agent Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. + - _``_: The name to use for your {{< param "PRODUCT_ROOT_NAME" >}} installation, such as `grafana-agent-flow`. + +For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. [Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent ## Next steps -- [Configure Grafana Agent][] +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Helm]: https://helm.sh {{% docs/reference %}} -[Configure Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-kubernetes.md" -[Configure Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-kubernetes.md" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-kubernetes.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-kubernetes.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/linux.md b/docs/sources/flow/setup/install/linux.md index 0c7feaa0940a..12e4f3323e7e 100644 --- a/docs/sources/flow/setup/install/linux.md +++ b/docs/sources/flow/setup/install/linux.md @@ -4,20 +4,21 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/install/linux/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/linux/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/linux/ +- /docs/grafana-cloud/send-data/agent/flow/setup/install/linux/ canonical: https://grafana.com/docs/agent/latest/flow/setup/install/linux/ -description: Learn how to install Grafana Agent in flow mode on Linux +description: Learn how to install Grafana Agent Flow on Linux menuTitle: Linux -title: Install or uninstall Grafana Agent in flow mode on Linux +title: Install Grafana Agent Flow on Linux weight: 300 --- -# Install or uninstall Grafana Agent in flow mode on Linux +# Install or uninstall {{% param "PRODUCT_NAME" %}} on Linux -You can install Grafana Agent in flow mode as a systemd service on Linux. +You can install {{< param "PRODUCT_NAME" >}} as a systemd service on Linux. ## Install -To install Grafana Agent in flow mode on Linux, run the following commands in a terminal window. +To install {{< param "PRODUCT_NAME" >}} on Linux, run the following commands in a terminal window. 1. Import the GPG key and add the Grafana package repository. @@ -58,7 +59,7 @@ sslcacert=/etc/pki/tls/certs/ca-bundle.crt' | sudo tee /etc/yum.repos.d/grafana. ``` {{< /code >}} -1. Install Grafana Agent. +1. Install {{< param "PRODUCT_NAME" >}}. {{< code >}} ```debian-ubuntu @@ -76,15 +77,15 @@ sslcacert=/etc/pki/tls/certs/ca-bundle.crt' | sudo tee /etc/yum.repos.d/grafana. ## Uninstall -To uninstall Grafana Agent on Linux, run the following commands in a terminal window. +To uninstall {{< param "PRODUCT_NAME" >}} on Linux, run the following commands in a terminal window. -1. Stop the systemd service for Grafana Agent. +1. Stop the systemd service for {{< param "PRODUCT_NAME" >}}. ```All-distros sudo systemctl stop grafana-agent-flow ``` -1. Uninstall Grafana Agent. +1. Uninstall {{< param "PRODUCT_NAME" >}}. {{< code >}} ```debian-ubuntu @@ -118,12 +119,12 @@ To uninstall Grafana Agent on Linux, run the following commands in a terminal wi ## Next steps -- [Start Grafana Agent][] -- [Configure Grafana Agent][] +- [Start {{< param "PRODUCT_NAME" >}}][Start] +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] {{% docs/reference %}} -[Start Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#linux" -[Start Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent.md#linux" -[Configure Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-linux.md" -[Configure Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-linux.md" +[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#linux" +[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#linux" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/macos.md b/docs/sources/flow/setup/install/macos.md index 8527a7ee0841..9aa7e86ee265 100644 --- a/docs/sources/flow/setup/install/macos.md +++ b/docs/sources/flow/setup/install/macos.md @@ -4,16 +4,17 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/install/macos/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/macos/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/macos/ +- /docs/grafana-cloud/send-data/agent/flow/setup/install/macos/ canonical: https://grafana.com/docs/agent/latest/flow/setup/install/macos/ -description: Learn how to install Grafana Agent in flow mode on macOS +description: Learn how to install Grafana AgentFlow on macOS menuTitle: macOS -title: Install Grafana Agent in flow mode on macOS +title: Install Grafana Agent Flow on macOS weight: 400 --- -# Install Grafana Agent in flow mode on macOS +# Install {{% param "PRODUCT_NAME" %}} on macOS -You can install Grafana Agent in flow mode on macOS with Homebrew . +You can install {{< param "PRODUCT_NAME" >}} on macOS with Homebrew . {{% admonition type="note" %}} The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. @@ -25,7 +26,7 @@ The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for ## Install -To install Grafana Agent on macOS, run the following commands in a terminal window. +To install {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. 1. Add the Grafana Homebrew tap: @@ -33,7 +34,7 @@ To install Grafana Agent on macOS, run the following commands in a terminal wind brew tap grafana/grafana ``` -1. Install Grafana Agent: +1. Install {{< param "PRODUCT_NAME" >}}: ```shell brew install grafana-agent-flow @@ -41,15 +42,15 @@ To install Grafana Agent on macOS, run the following commands in a terminal wind ## Upgrade -To upgrade Grafana Agent on macOS, run the following commands in a terminal window. +To upgrade {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. -1. Upgrade Grafana Agent: +1. Upgrade {{< param "PRODUCT_NAME" >}}: ```shell brew upgrade grafana-agent-flow ``` -1. Restart Grafana Agent: +1. Restart {{< param "PRODUCT_NAME" >}}: ```shell brew services restart grafana-agent-flow @@ -57,7 +58,7 @@ To upgrade Grafana Agent on macOS, run the following commands in a terminal wind ## Uninstall -To uninstall Grafana Agent on macOS, run the following command in a terminal window: +To uninstall {{< param "PRODUCT_NAME" >}} on macOS, run the following command in a terminal window: ```shell brew uninstall grafana-agent-flow @@ -65,14 +66,14 @@ brew uninstall grafana-agent-flow ## Next steps -- [Start Grafana Agent][] -- [Configure Grafana Agent][] +- [Start {{< param "PRODUCT_NAME" >}}][Start] +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Homebrew]: https://brew.sh {{% docs/reference %}} -[Start Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#macos" -[Start Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent.md#macos" -[Configure Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-macos.md" -[Configure Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-macos.md" +[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#macos" +[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#macos" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/windows.md b/docs/sources/flow/setup/install/windows.md index 7bbcd2241755..031b0caeab06 100644 --- a/docs/sources/flow/setup/install/windows.md +++ b/docs/sources/flow/setup/install/windows.md @@ -4,20 +4,21 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/install/windows/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/windows/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/windows/ +- /docs/grafana-cloud/send-data/agent/flow/setup/install/windows/ canonical: https://grafana.com/docs/agent/latest/flow/setup/install/windows/ -description: Learn how to install Grafana Agent in flow mode on Windows +description: Learn how to install Grafana Agent Flow on Windows menuTitle: Windows -title: Install Grafana Agent in flow mode on Windows +title: Install Grafana Agent Flow on Windows weight: 500 --- -# Install Grafana Agent in flow mode on Windows +# Install {{% param "PRODUCT_NAME" %}} on Windows -You can install Grafana Agent in flow mode on Windows as a standard graphical install, or as a silent install. +You can install {{< param "PRODUCT_NAME" >}} on Windows as a standard graphical install, or as a silent install. ## Standard graphical install -To do a standard graphical install of Grafana Agent on Windows, perform the following steps. +To do a standard graphical install of {{< param "PRODUCT_NAME" >}} on Windows, perform the following steps. 1. Navigate to the [latest release][latest] on GitHub. @@ -27,13 +28,13 @@ To do a standard graphical install of Grafana Agent on Windows, perform the foll 1. Unzip the downloaded file. -1. Double-click on `grafana-agent-installer.exe` to install Grafana Agent. +1. Double-click on `grafana-agent-installer.exe` to install {{< param "PRODUCT_NAME" >}}. -Grafana Agent is installed into the default directory `C:\Program Files\Grafana Agent Flow`. +{{< param "PRODUCT_NAME" >}} is installed into the default directory `C:\Program Files\Grafana Agent Flow`. ## Silent install -To do a silent install of Grafana Agent on Windows, perform the following steps. +To do a silent install of {{< param "PRODUCT_NAME" >}} on Windows, perform the following steps. 1. Navigate to the [latest release][latest] on GitHub. @@ -46,35 +47,47 @@ To do a silent install of Grafana Agent on Windows, perform the following steps. 1. Run the following command in PowerShell or Command Prompt: ```shell - PATH_TO_INSTALLER /S + /S ``` - Replace `PATH_TO_INSTALLER` with the path where the unzipped installer executable is located. + Replace the following: + + - _``_: The path where the unzipped installer executable is located. ### Silent install options * `/CONFIG=` Path to the configuration file. Default: `$INSTDIR\config.river` * `/DISABLEREPORTING=` Disable [data collection][]. Default: `no` * `/DISABLEPROFILING=` Disable profiling endpoint. Default: `no` +* `/ENVIRONMENT="KEY=VALUE\0KEY2=VALUE2"` Define environment variables for Windows Service. Default: `` + +## Service Configuration + +{{< param "PRODUCT_NAME" >}} uses the Windows Registry `HKLM\Software\Grafana\Grafana Agent Flow` for service configuration. + +* `Arguments` (Type `REG_MULTI_SZ`) Each value represents a binary argument for grafana-agent-flow binary. +* `Environment` (Type `REG_MULTI_SZ`) Each value represents a environment value `KEY=VALUE` for grafana-agent-flow binary. ## Uninstall -You can uninstall Grafana Agent with Windows Remove Programs or `C:\Program Files\Grafana Agent\uninstaller.exe`. Uninstalling Grafana Agent stops the service and removes it from disk. This includes any configuration files in the installation directory. +You can uninstall {{< param "PRODUCT_NAME" >}} with Windows Remove Programs or `C:\Program Files\Grafana Agent\uninstaller.exe`. +Uninstalling {{< param "PRODUCT_NAME" >}} stops the service and removes it from disk. +This includes any configuration files in the installation directory. -Grafana Agent can also be silently uninstalled by running `uninstall.exe /S` as Administrator. +{{< param "PRODUCT_NAME" >}} can also be silently uninstalled by running `uninstall.exe /S` as Administrator. ## Next steps -- [Start Grafana Agent][] -- [Configure Grafana Agent][] +- [Start {{< param "PRODUCT_NAME" >}}][Start] +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] [latest]: https://github.com/grafana/agent/releases/latest {{% docs/reference %}} -[Start Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#windows" -[Start Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent.md#windows" -[Configure Grafana Agent]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-windows.md" -[Configure Grafana Agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-windows.md" +[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#windows" +[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#windows" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-windows.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows.md" [data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/data-collection.md" +[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/start-agent.md b/docs/sources/flow/setup/start-agent.md index b76f728732a9..b8bd97a75e05 100644 --- a/docs/sources/flow/setup/start-agent.md +++ b/docs/sources/flow/setup/start-agent.md @@ -3,26 +3,27 @@ aliases: - /docs/grafana-cloud/agent/flow/setup/start-agent/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/start-agent/ +- /docs/grafana-cloud/send-data/agent/flow/setup/start-agent/ canonical: https://grafana.com/docs/agent/latest/flow/setup/start-agent/ description: Learn how to start, restart, and stop Grafana Agent after it is installed -menuTitle: Start flow mode -title: Start, restart, and stop Grafana Agent in flow mode +menuTitle: Start Grafana Agent Flow +title: Start, restart, and stop Grafana Agent Flow weight: 800 --- -# Start, restart, and stop Grafana Agent in flow mode +# Start, restart, and stop {{% param "PRODUCT_NAME" %}} -You can start, restart, and stop Grafana Agent after it is installed. +You can start, restart, and stop {{< param "PRODUCT_NAME" >}} after it is installed. ## Linux -Grafana Agent is installed as a [systemd][] service on Linux. +{{< param "PRODUCT_NAME" >}} is installed as a [systemd][] service on Linux. [systemd]: https://systemd.io/ -### Start Grafana Agent +### Start {{% param "PRODUCT_NAME" %}} -To start Grafana Agent, run the following command in a terminal window: +To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell sudo systemctl start grafana-agent-flow @@ -34,33 +35,33 @@ sudo systemctl start grafana-agent-flow sudo systemctl status grafana-agent-flow ``` -### Configure Grafana Agent to start at boot +### Configure {{% param "PRODUCT_NAME" %}} to start at boot -To automatically run Grafana Agent when the system starts, run the following command in a terminal window: +To automatically run {{< param "PRODUCT_NAME" >}} when the system starts, run the following command in a terminal window: ```shell sudo systemctl enable grafana-agent-flow.service ``` -### Restart Grafana Agent +### Restart {{% param "PRODUCT_NAME" %}} -To restart Grafana Agent, run the following command in a terminal window: +To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell sudo systemctl restart grafana-agent-flow ``` -### Stop Grafana Agent +### Stop {{% param "PRODUCT_NAME" %}} -To stop Grafana Agent, run the following command in a terminal window: +To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell sudo systemctl stop grafana-agent-flow ``` -### View Grafana Agent logs on Linux +### View {{% param "PRODUCT_NAME" %}} logs on Linux -To view the Grafana Agent log files, run the following command in a terminal window: +To view {{< param "PRODUCT_NAME" >}} log files, run the following command in a terminal window: ```shell sudo journalctl -u grafana-agent-flow @@ -68,17 +69,17 @@ sudo journalctl -u grafana-agent-flow ## macOS -Grafana Agent is installed as a launchd service on macOS. +{{< param "PRODUCT_NAME" >}} is installed as a launchd service on macOS. -### Start Grafana Agent +### Start {{% param "PRODUCT_NAME" %}} -To start Grafana Agent, run the following command in a terminal window: +To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell brew services start grafana-agent-flow ``` -Grafana Agent automatically runs when the system starts. +{{< param "PRODUCT_NAME" >}} automatically runs when the system starts. (Optional) To verify that the service is running, run the following command in a terminal window: @@ -86,35 +87,35 @@ Grafana Agent automatically runs when the system starts. brew services info grafana-agent-flow ``` -### Restart Grafana Agent +### Restart {{% param "PRODUCT_NAME" %}} -To restart Grafana Agent, run the following command in a terminal window: +To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell brew services restart grafana-agent-flow ``` -### Stop Grafana Agent +### Stop {{% param "PRODUCT_NAME" %}} -To stop Grafana Agent, run the following command in a terminal window: +To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: ```shell brew services stop grafana-agent-flow ``` -### View Grafana Agent logs on macOS +### View {{% param "PRODUCT_NAME" %}} logs on macOS By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and `$(brew --prefix)/var/log/grafana-agent-flow.err.log`. -If you followed [Configure the Grafana Agent service][] and changed the path where logs are written, -refer to your current copy of the Grafana Agent formula to locate your log files. +If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][Configure] and changed the path where logs are written, +refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. ## Windows -Grafana Agent is installed as a Windows Service. The service is configured to automatically run on startup. +{{< param "PRODUCT_NAME" >}} is installed as a Windows Service. The service is configured to automatically run on startup. -To verify that Grafana Agent is running as a Windows Service: +To verify that {{< param "PRODUCT_NAME" >}} is running as a Windows Service: 1. Open the Windows Services manager (services.msc): @@ -122,12 +123,12 @@ To verify that Grafana Agent is running as a Windows Service: 1. Type: `services.msc` and click **OK**. -1. Scroll down to find the **Grafana Agent Flow** service and verify that the **Status** is **Running**. +1. Scroll down to find the **{{< param "PRODUCT_NAME" >}}** service and verify that the **Status** is **Running**. -### View Grafana Agent logs +### View {{% param "PRODUCT_NAME" %}} logs -When running on Windows, Grafana Agent writes its logs to Windows Event -Logs with an event source name of **Grafana Agent Flow**. +When running on Windows, {{< param "PRODUCT_NAME" >}} writes its logs to Windows Event +Logs with an event source name of **{{< param "PRODUCT_NAME" >}}**. To view the logs, perform the following steps: @@ -139,45 +140,45 @@ To view the logs, perform the following steps: 1. In the Event Viewer, click on **Windows Logs > Application**. -1. Search for events with the source **Grafana Agent Flow**. +1. Search for events with the source **{{< param "PRODUCT_NAME" >}}**. ## Standalone binary -If you downloaded the standalone binary, you must run the agent from a terminal or command window. +If you downloaded the standalone binary, you must run {{< param "PRODUCT_NAME" >}} from a terminal or command window. -### Start Grafana Agent on Linux, macOS, or FreeBSD +### Start {{% param "PRODUCT_NAME" %}} on Linux, macOS, or FreeBSD -To start Grafana Agent on Linux, macOS, or FreeBSD, run the following command in a terminal window: +To start {{< param "PRODUCT_NAME" >}} on Linux, macOS, or FreeBSD, run the following command in a terminal window: ```shell -AGENT_MODE=flow BINARY_PATH run CONFIG_PATH +AGENT_MODE=flow run ``` Replace the following: -* `BINARY_PATH`: The path to the Grafana Agent binary file. -* `CONFIG_PATH`: The path to the Grafana Agent configuration file. +* _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. +* _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. -### Start Grafana Agent on Windows +### Start {{% param "PRODUCT_NAME" %}} on Windows -To start Grafana Agent on Windows, run the following commands in a command prompt: +To start {{< param "PRODUCT_NAME" >}} on Windows, run the following commands in a command prompt: ```cmd set AGENT_MODE=flow -BINARY_PATH run CONFIG_PATH + run ``` Replace the following: -* `BINARY_PATH`: The path to the Grafana Agent binary file. -* `CONFIG_PATH`: The path to the Grafana Agent configuration file. +* _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. +* _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. -### Set up Grafana Agent as a Linux systemd service +### Set up {{% param "PRODUCT_NAME" %}} as a Linux systemd service -You can set up and manage the standalone binary for Grafana Agent as a Linux systemd service. +You can set up and manage the standalone binary for {{< param "PRODUCT_NAME" >}} as a Linux systemd service. {{% admonition type="note" %}} -These steps assume you have a default systemd and Grafana Agent configuration. +These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} configuration. {{% /admonition %}} 1. To create a new user called `grafana-agent-flow` run the following command in a terminal window: @@ -200,8 +201,8 @@ These steps assume you have a default systemd and Grafana Agent configuration. User=grafana-agent-flow Environment=HOSTNAME=%H EnvironmentFile=/etc/default/grafana-agent-flow - WorkingDirectory=WORKING_DIRECTORY - ExecStart=BINARY_PATH run $CUSTOM_ARGS --storage.path=WORKING_PATH $CONFIG_FILE + WorkingDirectory= + ExecStart= run $CUSTOM_ARGS --storage.path= $CONFIG_FILE ExecReload=/usr/bin/env kill -HUP $MAINPID TimeoutStopSec=20s SendSIGKILL=no @@ -212,8 +213,8 @@ These steps assume you have a default systemd and Grafana Agent configuration. Replace the following: - * `BINARY_PATH`: The path to the Grafana Agent binary file. - * `WORKING_DIRECTORY`: The path to a working directory, for example `/var/lib/grafana-agent-flow`. + * _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. + * _``_: The path to a working directory, for example `/var/lib/grafana-agent-flow`. 1. Create an environment file in `/etc/default/` called `grafana-agent-flow` with the following contents: @@ -226,8 +227,8 @@ These steps assume you have a default systemd and Grafana Agent configuration. # # Command line options for grafana-agent # - # The configuration file holding the agent config. - CONFIG_FILE="CONFIG_PATH" + # The configuration file holding the Grafana Agent Flow configuration. + CONFIG_FILE="" # User-defined arguments to pass to the run command. CUSTOM_ARGS="" @@ -238,7 +239,7 @@ These steps assume you have a default systemd and Grafana Agent configuration. Replace the following: - * `CONFIG_PATH`: The path to the Grafana Agent configuration file. + * _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. 1. To reload the service files, run the following command in a terminal window: @@ -246,11 +247,11 @@ These steps assume you have a default systemd and Grafana Agent configuration. sudo systemctl daemon-reload ``` -1. Use the [Linux](#linux) systemd commands to manage your standalone Linux installation of Grafana Agent. +1. Use the [Linux](#linux) systemd commands to manage your standalone Linux installation of {{< param "PRODUCT_NAME" >}}. [release]: https://github.com/grafana/agent/releases/latest {{% docs/reference %}} -[Configure the Grafana Agent service]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-macos.md#configure-the-grafana-agent-service" -[Configure the Grafana Agent service]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-macos.md#configure-the-grafana-agent-service" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-service" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-service" {{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/_index.md b/docs/sources/flow/tasks/_index.md new file mode 100644 index 000000000000..4ca62e8c1331 --- /dev/null +++ b/docs/sources/flow/tasks/_index.md @@ -0,0 +1,25 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tasks/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/ +# Previous page aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/getting-started/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/ +- /docs/grafana-cloud/send-data/agent/flow/getting-started/ +- getting_started/ # /docs/agent/latest/flow/getting_started/ +- getting-started/ # /docs/agent/latest/flow/getting-started/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/ +description: How to perform common tasks with Grafana Agent Flow +menuTitle: Tasks +title: Tasks with Grafana Agent Flow +weight: 200 +--- + +# Tasks with {{% param "PRODUCT_NAME" %}} + +This section details how to perform common tasks with {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tasks/collect-opentelemetry-data.md b/docs/sources/flow/tasks/collect-opentelemetry-data.md new file mode 100644 index 000000000000..22248f9f70f9 --- /dev/null +++ b/docs/sources/flow/tasks/collect-opentelemetry-data.md @@ -0,0 +1,335 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tasks/collect-opentelemetry-data/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/collect-opentelemetry-data/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/collect-opentelemetry-data/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/collect-opentelemetry-data/ +# Previous page aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/getting-started/collect-opentelemetry-data/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-opentelemetry-data/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-opentelemetry-data/ +- /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-opentelemetry-data/ +- ../getting-started/collect-opentelemetry-data/ # /docs/agent/latest/flow/getting-started/collect-opentelemetry-data/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/collect-opentelemetry-data/ +description: Learn how to collect OpenTelemetry data +title: Collect OpenTelemetry data +weight: 300 +--- + +# Collect OpenTelemetry data + +{{< param "PRODUCT_NAME" >}} can be configured to collect [OpenTelemetry][]-compatible +data and forward it to any OpenTelemetry-compatible endpoint. + +This topic describes how to: + +* Configure OpenTelemetry data delivery. +* Configure batching. +* Receive OpenTelemetry data over OTLP. + +## Components used in this topic + +* [otelcol.auth.basic][] +* [otelcol.exporter.otlp][] +* [otelcol.exporter.otlphttp][] +* [otelcol.processor.batch][] +* [otelcol.receiver.otlp][] + +## Before you begin + +* Ensure that you have basic familiarity with instrumenting applications with OpenTelemetry. +* Have a set of OpenTelemetry applications ready to push telemetry data to {{< param "PRODUCT_NAME" >}}. +* Identify where {{< param "PRODUCT_NAME" >}} writes received telemetry data. +* Be familiar with the concept of [Components][] in {{< param "PRODUCT_NAME" >}}. + +## Configure an OpenTelemetry Protocol exporter + +Before components can receive OpenTelemetry data, you must have a component responsible for exporting the OpenTelemetry data. +An OpenTelemetry _exporter component_ is responsible for writing (exporting) OpenTelemetry data to an external system. + +In this task, you will use the [otelcol.exporter.otlp][] component to send OpenTelemetry data to a server using the OpenTelemetry Protocol (OTLP). +After an exporter component is defined, you can use other {{< param "PRODUCT_NAME" >}} components to forward data to it. + +> Refer to the list of available [Components][] for the full list of +> `otelcol.exporter` components that you can use to export OpenTelemetry data. + +To configure an `otelcol.exporter.otlp` component for exporting OpenTelemetry data using OTLP, complete the following steps: + +1. Add the following `otelcol.exporter.otlp` component to your configuration file: + + ```river + otelcol.exporter.otlp "" { + client { + url = ":" + } + } + ``` + + Replace the following: + + - _``_: The label for the component, such as `default`. + The label you use must be unique across all `otelcol.exporter.otlp` components in the same configuration file. + * _``_: The hostname or IP address of the server to send OTLP requests to. + - _``_: The port of the server to send OTLP requests to. + +2. If your server requires basic authentication, complete the following: + + 1. Add the following `otelcol.auth.basic` component to your configuration file: + + ```river + otelcol.auth.basic "" { + username = "" + password = "" + } + ``` + + Replace the following: + + - _``_: The label for the component, such as `default`. + The label you use must be unique across all `otelcol.auth.basic` components in the same configuration file. + - _``_: The basic authentication username. + - _``_: The basic authentication password or API key. + + 1. Add the following line inside of the `client` block of your `otelcol.exporter.otlp` component: + + ```river + auth = otelcol.auth.basic..handler + ``` + + Replace the following: + + - _``_: The label for the `otelcol.auth.basic` component. + +1. If you have more than one server to export metrics to, create a new `otelcol.exporter.otlp` component for each additional server. + +> `otelcol.exporter.otlp` sends data using OTLP over gRPC (HTTP/2). +> To send to a server using HTTP/1.1, follow the preceding steps, +> but use the [otelcol.exporter.otlphttp component][otelcol.exporter.otlphttp] instead. + +The following example demonstrates configuring `otelcol.exporter.otlp` with authentication and a component that forwards data to it: + +```river +otelcol.exporter.otlp "default" { + client { + endpoint = "my-otlp-grpc-server:4317" + auth = otelcol.auth.basic.credentials.handler + } +} + +otelcol.auth.basic "credentials" { + // Retrieve credentials using environment variables. + + username = env("BASIC_AUTH_USER") + password = env("API_KEY") +} + +otelcol.receiver.otlp "example" { + grpc { + endpoint = "127.0.0.1:4317" + } + + http { + endpoint = "127.0.0.1:4318" + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +For more information on writing OpenTelemetry data using the OpenTelemetry Protocol, refer to [otelcol.exporter.otlp][]. + +## Configure batching + +Production-ready {{< param "PRODUCT_NAME" >}} configurations shouldn't send OpenTelemetry data directly to an exporter for delivery. +Instead, data is usually sent to one or more _processor components_ that perform various transformations on the data. + +Ensuring data is batched is a production-readiness step to improve data compression and reduce the number of outgoing network requests to external systems. + +In this task, you will configure an [otelcol.processor.batch][] component to batch data before sending it to the exporter. + +> Refer to the list of available [Components][] for the full list of +> `otelcol.processor` components that you can use to process OpenTelemetry +> data. You can chain processors by having one processor send data to another +> processor. + +To configure an `otelcol.processor.batch` component, complete the following steps: + +1. Follow [Configure an OpenTelemetry Protocol exporter][] to ensure received data can be written to an external system. + +1. Add the following `otelcol.processor.batch` component into your configuration file: + + ```river + otelcol.processor.batch "" { + output { + metrics = [otelcol.exporter.otlp..input] + logs = [otelcol.exporter.otlp..input] + traces = [otelcol.exporter.otlp.>EXPORTER_LABEL>.input] + } + } + ``` + + Replace the following: + + - _``_: The label for the component, such as `default`. + The label you use must be unique across all `otelcol.processor.batch` components in the same configuration file. + - _``_: The label for your existing `otelcol.exporter.otlp` component. + + 1. To disable one of the telemetry types, set the relevant type in the `output` block to the empty list, such as `metrics = []`. + + 1. To send batched data to another processor, replace the components in the `output` list with the processor components to use. + +The following example demonstrates configuring a sequence of `otelcol.processor` components before being exported. + +```river +otelcol.processor.memory_limiter "default" { + check_interval = "1s" + limit = "1GiB" + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } +} + +otelcol.processor.batch "default" { + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "my-otlp-grpc-server:4317" + } +} +``` + +For more information on configuring OpenTelemetry data batching, refer to [otelcol.processor.batch][]. + +## Configure an OpenTelemetry Protocol receiver + +You can configure {{< param "PRODUCT_NAME" >}} to receive OpenTelemetry metrics, logs, and traces. +An OpenTelemetry _receiver_ component is responsible for receiving OpenTelemetry data from an external system. + +In this task, you will use the [otelcol.receiver.otlp][] component to receive OpenTelemetry data over the network using the OpenTelemetry Protocol (OTLP). +You can configure a receiver component to forward received data to other {{< param "PRODUCT_NAME" >}} components. + +> Refer to the list of available [Components][] for the full list of +> `otelcol.receiver` components that you can use to receive +> OpenTelemetry-compatible data. + +To configure an `otelcol.receiver.otlp` component for receiving OTLP data, complete the following steps: + +1. Follow [Configure an OpenTelemetry Protocol exporter][] to ensure received data can be written to an external system. + +1. Optional: Follow [Configure batching][] to improve compression and reduce the total amount of network requests. + +1. Add the following `otelcol.receiver.otlp` component to your configuration file. + + ```river + otelcol.receiver.otlp "