diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index b9f7b83d5f97..adf0c4277b10 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -2,7 +2,7 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.34.0" + "build_image": "grafana/loki-build-image:0.34.1" "golang_ci_lint_version": "v1.60.3" "release_lib_ref": "main" "skip_validation": false diff --git a/.github/workflows/helm-loki-ci.yml b/.github/workflows/helm-loki-ci.yml index 7a2850579118..5902d9f0f911 100644 --- a/.github/workflows/helm-loki-ci.yml +++ b/.github/workflows/helm-loki-ci.yml @@ -1,14 +1,14 @@ --- name: helm-loki-ci on: - pull_request: + # It runs with the configuration from base branch, so the changes of this file from the PR won't be taken into account until they are merged into main. see: https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#pull_request_target . + # This change is required to allow this CI to be run on Pull Requests opened from a fork repository + pull_request_target: paths: - "production/helm/loki/**" jobs: publish-diff: - # temporarily disable the workflow for the PRs where PRs branch is from fork. - if: github.event.pull_request.head.repo.full_name == github.repository name: Publish Rendered Helm Chart Diff runs-on: ubuntu-latest steps: diff --git a/.github/workflows/images.yml b/.github/workflows/images.yml index 97b40cb2e05b..bb2f50017d81 100644 --- a/.github/workflows/images.yml +++ b/.github/workflows/images.yml @@ -2,7 +2,7 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.34.0" + "build_image": "grafana/loki-build-image:0.34.1" "golang_ci_lint_version": "v1.60.3" "release_lib_ref": "main" "skip_validation": false diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index a5c52d0fb2ee..de7880161a2b 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -16,7 +16,7 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.34.0" + build_image: "grafana/loki-build-image:0.34.1" golang_ci_lint_version: "v1.60.3" release_lib_ref: "main" skip_validation: false @@ -143,7 +143,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.34.0" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.1" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages diff --git a/.github/workflows/operator-check-prepare-release-commit.yml b/.github/workflows/operator-check-prepare-release-commit.yml index 209923071863..c8e900829580 100644 --- a/.github/workflows/operator-check-prepare-release-commit.yml +++ b/.github/workflows/operator-check-prepare-release-commit.yml @@ -14,6 +14,14 @@ jobs: github.event.pull_request.head.ref == 'release-please--branches--main--components--operator' && contains(github.event.pull_request.title, 'chore( operator): community release') steps: + - id: "get_github_app_token" + name: Get GitHub Token + uses: "actions/create-github-app-token@v1" + with: + app-id: "${{ secrets.APP_ID }}" + owner: "${{ github.repository_owner }}" + private-key: "${{ secrets.APP_PRIVATE_KEY }}" + - name: Extract release version id: pr_semver env: @@ -31,7 +39,7 @@ jobs: - name: Check main commits for prepare release commit id: check_commit env: - GH_TOKEN: ${{ secrets.GH_TOKEN }} + GH_TOKEN: ${{ steps.get_github_app_token.outputs.token }} working-directory: "release" run: | COMMIT=$(gh search commits "chore(operator): prepare community release v${{ steps.pr_semver.outputs.semver }}") diff --git a/.github/workflows/operator-publish-operator-hub.yml b/.github/workflows/operator-publish-operator-hub.yml index c3fa69b46629..dd4d4c199af3 100644 --- a/.github/workflows/operator-publish-operator-hub.yml +++ b/.github/workflows/operator-publish-operator-hub.yml @@ -10,8 +10,6 @@ jobs: with: org: redhat-openshift-ecosystem repo: community-operators-prod - secrets: - GRAFANABOT_GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} operator-hub-community-release: if: startsWith(github.event.release.tag_name, 'operator/') @@ -19,5 +17,3 @@ jobs: with: org: k8s-operatorhub repo: community-operators - secrets: - GRAFANABOT_GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} diff --git a/.github/workflows/operator-release-please.yml b/.github/workflows/operator-release-please.yml index 77be2bc58a23..266dfd26e308 100644 --- a/.github/workflows/operator-release-please.yml +++ b/.github/workflows/operator-release-please.yml @@ -18,25 +18,39 @@ jobs: release_created: ${{ steps.release.outputs.operator--release_created }} release_name: ${{ steps.release.outputs.operator--tag_name }} steps: - - uses: google-github-actions/release-please-action@v4 + - id: "get_github_app_token" + name: Get GitHub App Token + uses: "actions/create-github-app-token@v1" + with: + app-id: "${{ secrets.APP_ID }}" + owner: "${{ github.repository_owner }}" + private-key: "${{ secrets.APP_PRIVATE_KEY }}" + - uses: googleapis/release-please-action@v4 id: release with: path: operator config-file: operator/release-please-config.json - token: ${{ secrets.GH_TOKEN }} + token: ${{ steps.get_github_app_token.outputs.token }} publishRelease: needs: - "releasePlease" runs-on: ubuntu-latest if: ${{ needs.releasePlease.outputs.release_created }} steps: + - id: "get_github_app_token" + name: Get GitHub App Token + uses: "actions/create-github-app-token@v1" + with: + app-id: "${{ secrets.APP_ID }}" + owner: "${{ github.repository_owner }}" + private-key: "${{ secrets.APP_PRIVATE_KEY }}" - name: "pull code to release" uses: "actions/checkout@v4" with: path: "release" - name: "publish release" env: - GH_TOKEN: ${{ secrets.GH_TOKEN }} + GH_TOKEN: ${{ steps.get_github_app_token.outputs.token }} working-directory: "release" run: | gh release edit "${{ needs.releasePlease.outputs.release_name }}" --draft=false --latest=false \ No newline at end of file diff --git a/.github/workflows/operator-reusable-hub-release.yml b/.github/workflows/operator-reusable-hub-release.yml index 862d072401dd..ecf279413417 100644 --- a/.github/workflows/operator-reusable-hub-release.yml +++ b/.github/workflows/operator-reusable-hub-release.yml @@ -9,14 +9,19 @@ on: repo: type: string required: true - secrets: - GRAFANABOT_GITHUB_TOKEN: - required: true jobs: create-operator-pull-request: runs-on: ubuntu-latest steps: + - id: "get_github_app_token" + name: Get GitHub App Token + uses: "actions/create-github-app-token@v1" + with: + app-id: "${{ secrets.APP_ID }}" + owner: "${{ github.repository_owner }}" + private-key: "${{ secrets.APP_PRIVATE_KEY }}" + - name: Set redhat-openshift-ecosystem specific variables if: ${{ inputs.org == 'redhat-openshift-ecosystem' }} env: @@ -36,7 +41,7 @@ jobs: - name: Sync fork env: - GH_TOKEN: ${{ secrets.GRAFANABOT_GITHUB_TOKEN }} + GH_TOKEN: ${{ steps.get_github_app_token.outputs.token }} run: | # synchronizing the fork is fast, and avoids the need to fetch the full upstream repo # (fetching the upstream repo with "--depth 1" would lead to "shallow update not allowed" @@ -49,13 +54,13 @@ jobs: uses: actions/checkout@v4 with: repository: grafanabot/${{ inputs.repo }} - token: ${{ secrets.GRAFANABOT_GITHUB_TOKEN }} + token: ${{ steps.get_github_app_token.outputs.token }} - name: Checkout loki to tmp/ directory uses: actions/checkout@v4 with: repository: grafana/loki - token: ${{ secrets.GRAFANABOT_GITHUB_TOKEN }} + token: ${{ steps.get_github_app_token.outputs.token }} path: tmp/ - name: Update version @@ -85,7 +90,7 @@ jobs: - name: Create pull request against ${{ inputs.org }}/${{ inputs.repo }} env: VERSION: ${{ env.version }} - GH_TOKEN: ${{ secrets.GRAFANABOT_GITHUB_TOKEN }} + GH_TOKEN: ${{ steps.get_github_app_token.outputs.token }} run: | message="Update the loki-operator to $VERSION" body="Release loki-operator \`$VERSION\`. diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index 800f9afd7106..cf982bc70e23 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -16,7 +16,7 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.34.0" + build_image: "grafana/loki-build-image:0.34.1" golang_ci_lint_version: "v1.60.3" release_lib_ref: "main" skip_validation: false @@ -143,7 +143,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.34.0" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.1" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1628a0b57dfb..3d074be5ec59 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -219,4 +219,4 @@ name: "create release" permissions: contents: "write" id-token: "write" - pull-requests: "write" \ No newline at end of file + pull-requests: "write" diff --git a/.golangci.yml b/.golangci.yml index ae10a6ba210b..9a3e34b7754b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -24,7 +24,7 @@ run: - cgo - promtail_journal_enabled - integration - + # output configuration options output: formats: @@ -63,7 +63,7 @@ linters: - govet - typecheck - depguard - - exportloopref + - copyloopvar - gofmt - goimports - gosimple diff --git a/Makefile b/Makefile index b75b4f5d5ced..609a8190f00a 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) BUILD_IN_CONTAINER ?= true # ensure you run `make release-workflows` after changing this -BUILD_IMAGE_VERSION ?= 0.34.0 +BUILD_IMAGE_VERSION ?= 0.34.1 GO_VERSION := 1.23.1 # Docker image info @@ -664,7 +664,7 @@ else endif build-image: ensure-buildx-builder - $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-build-image:$(IMAGE_TAG) ./loki-build-image + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-build-image:$(BUILD_IMAGE_VERSION) ./loki-build-image build-image-push: build-image ## push the docker build image ifneq (,$(findstring WIP,$(IMAGE_TAG))) @echo "Cannot push a WIP image, commit changes first"; \ diff --git a/clients/cmd/fluentd/docker/Gemfile b/clients/cmd/fluentd/docker/Gemfile index 981d6e701e63..f52d506bc3bf 100644 --- a/clients/cmd/fluentd/docker/Gemfile +++ b/clients/cmd/fluentd/docker/Gemfile @@ -3,4 +3,4 @@ source 'https://rubygems.org' gem 'fluentd', '1.15.3' -gem 'fluent-plugin-multi-format-parser', '~>1.0.0' +gem 'fluent-plugin-multi-format-parser', '~>1.1.0' diff --git a/clients/pkg/logentry/metric/counters_test.go b/clients/pkg/logentry/metric/counters_test.go index 49b38737d568..375f8fbd835b 100644 --- a/clients/pkg/logentry/metric/counters_test.go +++ b/clients/pkg/logentry/metric/counters_test.go @@ -78,7 +78,6 @@ func Test_validateCounterConfig(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() err := validateCounterConfig(&tt.config) diff --git a/clients/pkg/logentry/stages/decolorize_test.go b/clients/pkg/logentry/stages/decolorize_test.go index 029cd74c1c1e..7e7d0c88062b 100644 --- a/clients/pkg/logentry/stages/decolorize_test.go +++ b/clients/pkg/logentry/stages/decolorize_test.go @@ -36,8 +36,6 @@ func TestPipeline_Decolorize(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/clients/pkg/logentry/stages/eventlogmessage_test.go b/clients/pkg/logentry/stages/eventlogmessage_test.go index ed4bedccfc70..99ab57169934 100644 --- a/clients/pkg/logentry/stages/eventlogmessage_test.go +++ b/clients/pkg/logentry/stages/eventlogmessage_test.go @@ -106,7 +106,6 @@ func TestEventLogMessage_simple(t *testing.T) { } for testName, testData := range tests { - testData := testData testData.extractedValues[testData.sourcekey] = testData.msgdata t.Run(testName, func(t *testing.T) { @@ -151,7 +150,6 @@ func TestEventLogMessageConfig_validate(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { _, err := newEventLogMessageStage(util_log.Logger, tt.config) if tt.err != nil { @@ -262,7 +260,6 @@ func TestEventLogMessage_Real(t *testing.T) { } for testName, testData := range tests { - testData := testData testData.extractedValues[testData.sourcekey] = testData.msgdata t.Run(testName, func(t *testing.T) { @@ -318,7 +315,6 @@ func TestEventLogMessage_invalid(t *testing.T) { } for testName, testData := range tests { - testData := testData testData.extractedValues[testData.sourcekey] = testData.msgdata t.Run(testName, func(t *testing.T) { diff --git a/clients/pkg/logentry/stages/extensions_test.go b/clients/pkg/logentry/stages/extensions_test.go index 0d03acd3fe3d..2cbe411c2a03 100644 --- a/clients/pkg/logentry/stages/extensions_test.go +++ b/clients/pkg/logentry/stages/extensions_test.go @@ -65,7 +65,6 @@ func TestNewDocker(t *testing.T) { } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() p, err := NewDocker(util_log.Logger, prometheus.DefaultRegisterer) @@ -268,7 +267,6 @@ func TestNewCri(t *testing.T) { } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() cfg := map[string]interface{}{} diff --git a/clients/pkg/logentry/stages/json_test.go b/clients/pkg/logentry/stages/json_test.go index 1764387253fb..9e0d2dffaea4 100644 --- a/clients/pkg/logentry/stages/json_test.go +++ b/clients/pkg/logentry/stages/json_test.go @@ -78,8 +78,6 @@ func TestPipeline_JSON(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() @@ -179,7 +177,6 @@ func TestJSONConfig_validate(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { c, err := parseJSONConfig(tt.config) assert.NoError(t, err, "failed to create config: %s", err) @@ -339,7 +336,6 @@ func TestJSONParser_Parse(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() p, err := New(util_log.Logger, nil, StageTypeJSON, tt.config, nil) diff --git a/clients/pkg/logentry/stages/labels_test.go b/clients/pkg/logentry/stages/labels_test.go index 27747d8032ed..f42263211524 100644 --- a/clients/pkg/logentry/stages/labels_test.go +++ b/clients/pkg/logentry/stages/labels_test.go @@ -115,7 +115,6 @@ func TestLabels(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() err := validateLabelsConfig(test.config) @@ -176,7 +175,6 @@ func TestLabelStage_Process(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() st, err := newLabelStage(util_log.Logger, test.config) diff --git a/clients/pkg/logentry/stages/logfmt_test.go b/clients/pkg/logentry/stages/logfmt_test.go index ed60d8770d01..1406aa080cf8 100644 --- a/clients/pkg/logentry/stages/logfmt_test.go +++ b/clients/pkg/logentry/stages/logfmt_test.go @@ -64,8 +64,6 @@ func TestPipeline_Logfmt(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() @@ -153,7 +151,6 @@ func TestLogfmtConfig_validate(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { c, err := parseLogfmtConfig(tt.config) assert.NoError(t, err) @@ -281,7 +278,6 @@ func TestLogfmtParser_Parse(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() p, err := New(util_log.Logger, nil, StageTypeLogfmt, tt.config, nil) diff --git a/clients/pkg/logentry/stages/metrics_test.go b/clients/pkg/logentry/stages/metrics_test.go index 563ee9eab664..133220ea6d5d 100644 --- a/clients/pkg/logentry/stages/metrics_test.go +++ b/clients/pkg/logentry/stages/metrics_test.go @@ -415,7 +415,6 @@ func TestValidateMetricsConfig(t *testing.T) { } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() err := validateMetricsConfig(test.config) diff --git a/clients/pkg/logentry/stages/output_test.go b/clients/pkg/logentry/stages/output_test.go index dc6aac54f0b9..b69399e1dbce 100644 --- a/clients/pkg/logentry/stages/output_test.go +++ b/clients/pkg/logentry/stages/output_test.go @@ -86,7 +86,6 @@ func TestOutputValidation(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() err := validateOutputConfig(test.config) @@ -120,7 +119,6 @@ func TestOutputStage_Process(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() st, err := newOutputStage(util_log.Logger, test.config) diff --git a/clients/pkg/logentry/stages/pipeline_test.go b/clients/pkg/logentry/stages/pipeline_test.go index 2649de6a8344..356955be032c 100644 --- a/clients/pkg/logentry/stages/pipeline_test.go +++ b/clients/pkg/logentry/stages/pipeline_test.go @@ -194,8 +194,6 @@ func TestPipeline_Process(t *testing.T) { } for tName, tt := range tests { - tt := tt - t.Run(tName, func(t *testing.T) { var config map[string]interface{} @@ -304,7 +302,6 @@ func TestPipeline_Wrap(t *testing.T) { } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() c := fake.New(func() {}) diff --git a/clients/pkg/logentry/stages/regex_test.go b/clients/pkg/logentry/stages/regex_test.go index f7fa5390a195..362c5990575c 100644 --- a/clients/pkg/logentry/stages/regex_test.go +++ b/clients/pkg/logentry/stages/regex_test.go @@ -102,8 +102,6 @@ func TestPipeline_Regex(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() @@ -204,7 +202,6 @@ func TestRegexConfig_validate(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { c, err := parseRegexConfig(tt.config) if err != nil { @@ -322,7 +319,6 @@ func TestRegexParser_Parse(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() p, err := New(util_log.Logger, nil, StageTypeRegex, tt.config, nil) diff --git a/clients/pkg/logentry/stages/replace_test.go b/clients/pkg/logentry/stages/replace_test.go index 87bb3eecb898..6e583f19b8b0 100644 --- a/clients/pkg/logentry/stages/replace_test.go +++ b/clients/pkg/logentry/stages/replace_test.go @@ -161,8 +161,6 @@ func TestPipeline_Replace(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() @@ -252,7 +250,6 @@ func TestReplaceConfig_validate(t *testing.T) { }, } for tName, tt := range tests { - tt := tt t.Run(tName, func(t *testing.T) { c, err := parseReplaceConfig(tt.config) if err != nil { diff --git a/clients/pkg/logentry/stages/template_test.go b/clients/pkg/logentry/stages/template_test.go index 7977c87ffee6..15a0a2a1f78f 100644 --- a/clients/pkg/logentry/stages/template_test.go +++ b/clients/pkg/logentry/stages/template_test.go @@ -105,7 +105,6 @@ func TestTemplateValidation(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() _, err := validateTemplateConfig(test.config) @@ -375,7 +374,6 @@ func TestTemplateStage_Process(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() st, err := newTemplateStage(util_log.Logger, test.config) diff --git a/clients/pkg/logentry/stages/tenant_test.go b/clients/pkg/logentry/stages/tenant_test.go index 8eee783d47dd..e52431581806 100644 --- a/clients/pkg/logentry/stages/tenant_test.go +++ b/clients/pkg/logentry/stages/tenant_test.go @@ -126,8 +126,6 @@ func TestTenantStage_Validation(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { stage, err := newTenantStage(util_log.Logger, testData.config) @@ -202,8 +200,6 @@ func TestTenantStage_Process(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { stage, err := newTenantStage(util_log.Logger, testData.config) require.NoError(t, err) diff --git a/clients/pkg/logentry/stages/timestamp_test.go b/clients/pkg/logentry/stages/timestamp_test.go index f3f23dcfceba..2e0ea0c054da 100644 --- a/clients/pkg/logentry/stages/timestamp_test.go +++ b/clients/pkg/logentry/stages/timestamp_test.go @@ -174,7 +174,6 @@ func TestTimestampValidation(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() parser, err := validateTimestampConfig(test.config) @@ -295,7 +294,6 @@ func TestTimestampStage_Process(t *testing.T) { }, } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() st, err := newTimestampStage(util_log.Logger, test.config) @@ -431,8 +429,6 @@ func TestTimestampStage_ProcessActionOnFailure(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/clients/pkg/logentry/stages/util_test.go b/clients/pkg/logentry/stages/util_test.go index 5ce0ae9a7f93..863b43533975 100644 --- a/clients/pkg/logentry/stages/util_test.go +++ b/clients/pkg/logentry/stages/util_test.go @@ -145,8 +145,6 @@ func TestConvertDateLayout(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() @@ -224,8 +222,6 @@ func TestParseTimestampWithoutYear(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/clients/pkg/promtail/client/batch_test.go b/clients/pkg/promtail/client/batch_test.go index ec92fbc1c422..9afbe4e457ce 100644 --- a/clients/pkg/promtail/client/batch_test.go +++ b/clients/pkg/promtail/client/batch_test.go @@ -73,8 +73,6 @@ func TestBatch_add(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { b := newBatch(0) @@ -123,8 +121,6 @@ func TestBatch_encode(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/clients/pkg/promtail/client/config_test.go b/clients/pkg/promtail/client/config_test.go index 7a1a3fb518b5..cb1476f1a2a0 100644 --- a/clients/pkg/promtail/client/config_test.go +++ b/clients/pkg/promtail/client/config_test.go @@ -97,7 +97,6 @@ func Test_Config(t *testing.T) { }, } for _, tc := range tests { - tc := tc err := yaml.Unmarshal([]byte(tc.configValues), &clientConfig) if tc.expectedErr != nil { diff --git a/clients/pkg/promtail/config/config.go b/clients/pkg/promtail/config/config.go index 0454a8facf49..7e0e2b63fe17 100644 --- a/clients/pkg/promtail/config/config.go +++ b/clients/pkg/promtail/config/config.go @@ -42,7 +42,6 @@ type Config struct { // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = Config{} // We want to set c to the defaults and then overwrite it with the input. // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML // again, we have to hide it using a type indirection. diff --git a/clients/pkg/promtail/config/config_test.go b/clients/pkg/promtail/config/config_test.go index a812dd984abc..fe197044b683 100644 --- a/clients/pkg/promtail/config/config_test.go +++ b/clients/pkg/promtail/config/config_test.go @@ -175,7 +175,6 @@ func TestConfig_Setup(t *testing.T) { }, }, } { - tt := tt t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { tt.in.Setup(log.NewNopLogger()) require.Equal(t, tt.expected, tt.in) diff --git a/clients/pkg/promtail/targets/kafka/target_syncer_test.go b/clients/pkg/promtail/targets/kafka/target_syncer_test.go index f450a10d67f3..9a279c2a3670 100644 --- a/clients/pkg/promtail/targets/kafka/target_syncer_test.go +++ b/clients/pkg/promtail/targets/kafka/target_syncer_test.go @@ -195,7 +195,6 @@ func Test_validateConfig(t *testing.T) { } for i, tt := range tests { - tt := tt t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { err := validateConfig(tt.cfg) if (err != nil) != tt.wantErr { diff --git a/clients/pkg/promtail/targets/kafka/topics_test.go b/clients/pkg/promtail/targets/kafka/topics_test.go index 447a8a0a65af..2bc9b4637146 100644 --- a/clients/pkg/promtail/targets/kafka/topics_test.go +++ b/clients/pkg/promtail/targets/kafka/topics_test.go @@ -49,7 +49,6 @@ func Test_NewTopicManager(t *testing.T) { false, }, } { - tt := tt t.Run(strings.Join(tt.in, ","), func(t *testing.T) { t.Parallel() _, err := newTopicManager(&mockKafkaClient{}, tt.in) @@ -86,7 +85,6 @@ func Test_Topics(t *testing.T) { false, }, } { - tt := tt t.Run("", func(t *testing.T) { t.Parallel() diff --git a/clients/pkg/promtail/targets/syslog/syslogtarget_test.go b/clients/pkg/promtail/targets/syslog/syslogtarget_test.go index aedb78d6979d..7f379320f0c6 100644 --- a/clients/pkg/promtail/targets/syslog/syslogtarget_test.go +++ b/clients/pkg/promtail/targets/syslog/syslogtarget_test.go @@ -305,7 +305,6 @@ func Benchmark_SyslogTarget(b *testing.B) { {"tcp", protocolTCP, fmtOctetCounting}, {"udp", protocolUDP, fmtOctetCounting}, } { - tt := tt b.Run(tt.name, func(b *testing.B) { client := fake.New(func() {}) @@ -366,7 +365,6 @@ func TestSyslogTarget(t *testing.T) { {"udp newline separated", protocolUDP, fmtNewline}, {"udp octetcounting", protocolUDP, fmtOctetCounting}, } { - tt := tt t.Run(tt.name, func(t *testing.T) { w := log.NewSyncWriter(os.Stderr) logger := log.NewLogfmtLogger(w) @@ -481,7 +479,6 @@ func TestSyslogTarget_RFC5424Messages(t *testing.T) { {"tcp newline separated", protocolTCP, fmtNewline}, {"tcp octetcounting", protocolTCP, fmtOctetCounting}, } { - tt := tt t.Run(tt.name, func(t *testing.T) { w := log.NewSyncWriter(os.Stderr) logger := log.NewLogfmtLogger(w) diff --git a/docs/sources/community/maintaining/release-loki-build-image.md b/docs/sources/community/maintaining/release-loki-build-image.md index d6e1f15b1d81..45e98739324d 100644 --- a/docs/sources/community/maintaining/release-loki-build-image.md +++ b/docs/sources/community/maintaining/release-loki-build-image.md @@ -14,23 +14,11 @@ if any changes were made in the folder `./loki-build-image/`. **To build and use the `loki-build-image`:** -## Step 1 - -1. Create a branch with the desired changes to the Dockerfile. -2. Update the version tag of the `loki-build-image` pipeline defined in `.drone/drone.jsonnet` (search for `pipeline('loki-build-image')`) to a new version number (try to follow semver). -3. Run `DRONE_SERVER=https://drone.grafana.net/ DRONE_TOKEN= make drone` and commit the changes to the same branch. - 1. The `` is your personal drone token, which can be found by navigating to https://drone.grafana.net/account. -4. Create a PR. -5. Once approved and merged to `main`, the image with the new version is built and published. - {{% admonition type="note" %}} - Keep an eye on https://drone.grafana.net/grafana/loki for the build after merging ([example](https://drone.grafana.net/grafana/loki/17760/1/2)). - {{% /admonition %}} - -## Step 2 - -1. Create a branch. -2. Update the `BUILD_IMAGE_VERSION` variable in the `Makefile`. -3. Run `loki-build-image/version-updater.sh ` to update all the references. -4. Run `DRONE_SERVER=https://drone.grafana.net/ DRONE_TOKEN= make drone` to update the Drone config to use the new build image. -5. Create a new PR. - +1. Create a branch with the desired changes to the `./loki-build-image/Dockerfile`. +1. Update the `BUILD_IMAGE_VERSION` variable in the `Makefile`. +1. Commit your changes. +1. Run `make build-image-push` to build and publish the new version of the build image. +1. Run `make release-workflows` to update the Github workflows. +1. Commit your changes. +1. Push your changes to the remote branch. +1. Open a PR against the `main` branch. diff --git a/docs/sources/send-data/otel/otel-collector-getting-started.md b/docs/sources/send-data/otel/otel-collector-getting-started.md new file mode 100644 index 000000000000..08b374656d96 --- /dev/null +++ b/docs/sources/send-data/otel/otel-collector-getting-started.md @@ -0,0 +1,366 @@ +--- +title: Getting started with the OpenTelemetry Collector and Loki tutorial +menuTitle: OTel Collector tutorial +description: A Tutorial configuring the OpenTelemetry Collector to send OpenTelemetry logs to Loki +weight: 300 +killercoda: + title: Getting started with the OpenTelemetry Collector and Loki tutorial + description: A Tutorial configuring the OpenTelemetry Collector to send OpenTelemetry logs to Loki + preprocessing: + substitutions: + - regexp: loki-fundamentals-otel-collector-1 + replacement: loki-fundamentals_otel-collector_1 + backend: + imageid: ubuntu +--- + + + +# Getting started with the OpenTelemetry Collector and Loki tutorial + +The OpenTelemetry Collector offers a vendor-agnostic implementation of how to receive, process, and export telemetry data. With the introduction of the OTLP endpoint in Loki, you can now send logs from applications instrumented with OpenTelemetry to Loki using the OpenTelemetry Collector in native OTLP format. +In this example, we will teach you how to configure the OpenTelemetry Collector to receive logs in the OpenTelemetry format and send them to Loki using the OTLP HTTP protocol. This will involve configuring the following components in the OpenTelemetry Collector: +- **OpenTelemetry Receiver:** This component will receive logs in the OpenTelemetry format via HTTP and gRPC. +- **OpenTelemetry Processor:** This component will accept telemetry data from other `otelcol.*` components and place them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. +- **OpenTelemetry Exporter:** This component will accept telemetry data from other `otelcol.*` components and write them over the network using the OTLP HTTP protocol. We will use this exporter to send the logs to the Loki native OTLP endpoint. + + + +## Dependencies + +Before you begin, ensure you have the following to run the demo: + +- Docker +- Docker Compose + +{{< admonition type="tip" >}} +Alternatively, you can try out this example in our interactive learning environment: [Getting started with the OpenTelemetry Collector and Loki tutorial](https://killercoda.com/grafana-labs/course/loki/otel-collector-getting-started). + +It's a fully configured environment with all the dependencies already installed. + +![Interactive](/media/docs/loki/loki-ile.svg) + +Provide feedback, report bugs, and raise issues for the tutorial in the [Grafana Killercoda repository](https://github.com/grafana/killercoda). +{{< /admonition >}} + + + +## Scenario + +In this scenario, we have a microservices application called the Carnivorous Greenhouse. This application consists of the following services: + +- **User Service:** Manages user data and authentication for the application. Such as creating users and logging in. +- **Plant Service:** Manages the creation of new plants and updates other services when a new plant is created. +- **Simulation Service:** Generates sensor data for each plant. +- **Websocket Service:** Manages the websocket connections for the application. +- **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs. +- **Main App:** The main application that ties all the services together. +- **Database:** A database that stores user and plant data. + +Each service generates logs using the OpenTelemetry SDK and exports to the OpenTelemetry Collector in the OpenTelemetry format (OTLP). The Collector then ingests the logs and sends them to Loki. + + + + + +## Step 1: Environment setup + +In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose. + +1. To get started, clone the repository that contains our demo application: + + ```bash + git clone -b microservice-otel-collector https://github.com/grafana/loki-fundamentals.git + ``` + +1. Next we will spin up our observability stack using Docker Compose: + + + ```bash + docker compose -f loki-fundamentals/docker-compose.yml up -d + ``` + + + {{< docs/ignore >}} + + + ```bash + docker-compose -f loki-fundamentals/docker-compose.yml up -d + ``` + + + {{< /docs/ignore >}} + + To check the status of services we can run the following command: + ```bash + docker ps -a + ``` + + {{< admonition type="note" >}} + The OpenTelemetry Collector container will show as `Stopped` or `Exited (1) About a minute ago`. This is expected as we have provided an empty configuration file. We will update this file in the next step. + {{< /admonition >}} + + + +After we've finished configuring the OpenTelemetry Collector and sending logs to Loki, we will be able to view the logs in Grafana. To check if Grafana is up and running, navigate to the following URL: [http://localhost:3000](http://localhost:3000) + + + + +## Step 2: Configuring the OpenTelemetry Collector + +To configure the Collector to ingest OpenTelemetry logs from our application, we need to provide a configuration file. This configuration file will define the components and their relationships. We will build the entire observability pipeline within this configuration file. + +### Open your code editor and locate the `otel-config.yaml` file + +The configuration file is written using **YAML** configuration syntax. To start, we will open the `otel-config.yaml` file in the code editor: + +{{< docs/ignore >}} +**Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.** +1. Expand the `loki-fundamentals` directory in the file explorer of the `Editor` tab. +2. Locate the `otel-config.yaml` file in the top level directory, `loki-fundamentals`. +3. Click on the `otel-config.yaml` file to open it in the code editor. +{{< /docs/ignore >}} + + +1. Open the `loki-fundamentals` directory in a code editor of your choice. +1. Locate the `otel-config.yaml` file in the `loki-fundamentals` directory (Top level directory). +1. Click on the `otel-config.yaml` file to open it in the code editor. + + +You will copy all three of the following configuration snippets into the `otel-config.yaml` file. + +### Receive OpenTelemetry logs via gRPC and HTTP + +First, we will configure the OpenTelemetry receiver. `otlp:` accepts logs in the OpenTelemetry format via HTTP and gRPC. We will use this receiver to receive logs from the Carnivorous Greenhouse application. + +Now add the following configuration to the `otel-config.yaml` file: +```yaml +# Receivers +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +``` + +In this configuration: +- `receivers`: The list of receivers to receive telemetry data. In this case, we are using the `otlp` receiver. +- `otlp`: The OpenTelemetry receiver that accepts logs in the OpenTelemetry format. +- `protocols`: The list of protocols that the receiver supports. In this case, we are using `grpc` and `http`. +- `grpc`: The gRPC protocol configuration. The receiver will accept logs via gRPC on `4317`. +- `http`: The HTTP protocol configuration. The receiver will accept logs via HTTP on `4318`. +- `endpoint`: The IP address and port number to listen on. In this case, we are listening on all IP addresses on port `4317` for gRPC and port `4318` for HTTP. + +For more information on the `otlp` receiver configuration, see the [OpenTelemetry Receiver OTLP documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/README.md). + + +### Create batches of logs using a OpenTelemetry Processor + +Next add the following configuration to the `otel-config.yaml` file: +```yaml +# Processors +processors: + batch: +``` + +In this configuration: +- `processors`: The list of processors to process telemetry data. In this case, we are using the `batch` processor. +- `batch`: The OpenTelemetry processor that accepts telemetry data from other `otelcol` components and places them into batches. + +For more information on the `batch` processor configuration, see the [OpenTelemetry Processor Batch documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md). + +### Export logs to Loki using a OpenTelemetry Exporter + +We will use the `otlphttp/logs` exporter to send the logs to the Loki native OTLP endpoint. Add the following configuration to the `otel-config.yaml` file: +```yaml +# Exporters +exporters: + otlphttp/logs: + endpoint: "http://loki:3100/otlp" + tls: + insecure: true +``` +In this configuration: +- `exporters`: The list of exporters to export telemetry data. In this case, we are using the `otlphttp/logs` exporter. +- `otlphttp/logs`: The OpenTelemetry exporter that accepts telemetry data from other `otelcol` components and writes them over the network using the OTLP HTTP protocol. +- `endpoint`: The URL to send the telemetry data to. In this case, we are sending the logs to the Loki native OTLP endpoint at `http://loki:3100/otlp`. +- `tls`: The TLS configuration for the exporter. In this case, we are setting `insecure` to `true` to disable TLS verification. +- `insecure`: Disables TLS verification. This is set to `true` as we are using an insecure connection. + +For more information on the `otlphttp/logs` exporter configuration, see the [OpenTelemetry Exporter OTLP HTTP documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlphttpexporter/README.md) + +### Creating the Pipeline + +Now that we have configured the receiver, processor, and exporter, we need to create a pipeline to connect these components. Add the following configuration to the `otel-config.yaml` file: +```yaml +# Pipelines +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp/logs] +``` + +In this configuration: +- `pipelines`: The list of pipelines to connect the receiver, processor, and exporter. In this case, we are using the `logs` pipeline but there is also pipelines for metrics, traces, and continuous profiling. +- `receivers`: The list of receivers to receive telemetry data. In this case, we are using the `otlp` receiver component we created earlier. +- `processors`: The list of processors to process telemetry data. In this case, we are using the `batch` processor component we created earlier. +- `exporters`: The list of exporters to export telemetry data. In this case, we are using the `otlphttp/logs` component exporter we created earlier. + + +### Load the Configuration + +Before you load the configuration into the OpenTelemetry Collector, compare your configuration with the completed configuration below: + +```yaml +# Receivers +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +# Processors +processors: + batch: + +# Exporters +exporters: + otlphttp/logs: + endpoint: "http://loki:3100/otlp" + tls: + insecure: true + +# Pipelines +service: + pipelines: + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlphttp/logs] +``` +Next, we need apply the configuration to the OpenTelemetry Collector. To do this, we will restart the OpenTelemetry Collector container: + +```bash +docker restart loki-fundamentals-otel-collector-1 +``` + + +This will restart the OpenTelemetry Collector container with the new configuration. You can check the logs of the OpenTelemetry Collector container to see if the configuration was loaded successfully: + +```bash +docker logs loki-fundamentals-otel-collector-1 +``` + +Within the logs, you should see the following message: +```console +2024-08-02T13:10:25.136Z info service@v0.106.1/service.go:225 Everything is ready. Begin running and processing data. +``` + +## Stuck? Need help? + +If you get stuck or need help creating the configuration, you can copy and replace the entire `otel-config.yaml` using the completed configuration file: + + +```bash +cp loki-fundamentals/completed/otel-config.yaml loki-fundamentals/otel-config.yaml +docker restart loki-fundamentals-otel-collector-1 +``` + + + + + + +## Step 3: Start the Carnivorous Greenhouse + +In this step, we will start the Carnivorous Greenhouse application. To start the application, run the following command: + +{{< admonition type="note" >}} +This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first. +{{< /admonition >}} + + +{{< docs/ignore >}} + +**Note: This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first.** + +{{< /docs/ignore >}} + + +```bash +docker compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +``` + + + +{{< docs/ignore >}} + + +```bash +docker-compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build +``` + + +{{< /docs/ignore >}} + +This will start the following services: +```console + ✔ Container greenhouse-db-1 Started + ✔ Container greenhouse-websocket_service-1 Started + ✔ Container greenhouse-bug_service-1 Started + ✔ Container greenhouse-user_service-1 Started + ✔ Container greenhouse-plant_service-1 Started + ✔ Container greenhouse-simulation_service-1 Started + ✔ Container greenhouse-main_app-1 Started +``` + +Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005](http://localhost:5005). Generate some logs by interacting with the application in the following ways: + +1. Create a user. +1. Log in. +1. Create a few plants to monitor. +1. Enable bug mode to activate the bug service. This will cause services to fail and generate additional logs. + +Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Grafana at [http://localhost:3000/a/grafana-lokiexplore-app/explore](http://localhost:3000/a/grafana-lokiexplore-app/explore). + + + + + + +## Summary + +In this example, we configured the OpenTelemetry Collector to receive logs from an example application and send them to Loki using the native OTLP endpoint. Make sure to also consult the Loki configuration file `loki-config.yaml` to understand how we have configured Loki to receive logs from the OpenTelemetry Collector. + +{{< docs/ignore >}} + +### Back to Docs +Head back to where you started from to continue with the [Loki documentation](https://grafana.com/docs/loki/latest/send-data/otel). + +{{< /docs/ignore >}} + + +## Further reading + +For more information on the OpenTelemetry Collector and the native OTLP endpoint of Loki, refer to the following resources: + +- [Loki OTLP endpoint](https://grafana.com/docs/loki//send-data/otel/) +- [How is native OTLP endpoint different from Loki Exporter](https://grafana.com/docs/loki//send-data/otel/native_otlp_vs_loki_exporter) +- [OpenTelemetry Collector Configuration](https://opentelemetry.io/docs/collector/configuration/) + + +## Complete metrics, logs, traces, and profiling example + +If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana. + +The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud. + + + diff --git a/docs/sources/setup/install/_index.md b/docs/sources/setup/install/_index.md index 2b56cba78cb6..67356a44d4d9 100644 --- a/docs/sources/setup/install/_index.md +++ b/docs/sources/setup/install/_index.md @@ -9,7 +9,7 @@ weight: 200 # Install Loki -There are several methods of installing Loki and Promtail: +There are several methods of installing Loki: - [Install using Helm (recommended)]({{< relref "./helm" >}}) - [Install using Tanka]({{< relref "./tanka" >}}) @@ -17,12 +17,16 @@ There are several methods of installing Loki and Promtail: - [Install and run locally]({{< relref "./local" >}}) - [Install from source]({{< relref "./install-from-source" >}}) +Alloy: +- [Install Alloy](https://grafana.com/docs/alloy/latest/set-up/install/) +- [Ingest Logs with Alloy]({{< relref "../../send-data/alloy" >}}) + ## General process In order to run Loki, you must: -1. Download and install both Loki and Promtail. +1. Download and install both Loki and Alloy. 1. Download config files for both programs. 1. Start Loki. -1. Update the Promtail config file to get your logs into Loki. -1. Start Promtail. +1. Update the Alloy config file to get your logs into Loki. +1. Start Alloy. diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index c8d5f7bbbd10..43d246ec1fad 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -6186,6 +6186,7 @@ null "azure": { "accountKey": null, "accountName": null, + "chunkDelimiter": null, "connectionString": null, "endpointSuffix": null, "requestTimeout": null, diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index ce2c8d359d75..132de42b8107 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -839,6 +839,20 @@ kafka_config: # CLI flag: -kafka.producer-max-buffered-bytes [producer_max_buffered_bytes: | default = 1073741824] + # The best-effort maximum lag a consumer tries to achieve at startup. Set both + # -kafka.target-consumer-lag-at-startup and -kafka.max-consumer-lag-at-startup + # to 0 to disable waiting for maximum consumer lag being honored at startup. + # CLI flag: -kafka.target-consumer-lag-at-startup + [target_consumer_lag_at_startup: | default = 2s] + + # The guaranteed maximum lag before a consumer is considered to have caught up + # reading from a partition at startup, becomes ACTIVE in the hash ring and + # passes the readiness check. Set both -kafka.target-consumer-lag-at-startup + # and -kafka.max-consumer-lag-at-startup to 0 to disable waiting for maximum + # consumer lag being honored at startup. + # CLI flag: -kafka.max-consumer-lag-at-startup + [max_consumer_lag_at_startup: | default = 15s] + # Configuration for 'runtime config' module, responsible for reloading runtime # configuration file. [runtime_config: ] @@ -1298,18 +1312,9 @@ Experimental: The `bloom_gateway` block configures the Loki bloom gateway server client: # Configures the behavior of the connection pool. pool_config: - # How frequently to clean up clients for servers that have gone away or are - # unhealthy. + # How frequently to update the list of servers. # CLI flag: -bloom-gateway-client.pool.check-interval - [check_interval: | default = 10s] - - # Run a health check on each server during periodic cleanup. - # CLI flag: -bloom-gateway-client.pool.enable-health-check - [enable_health_check: | default = true] - - # Timeout for the health check if health check is enabled. - # CLI flag: -bloom-gateway-client.pool.health-check-timeout - [health_check_timeout: | default = 1s] + [check_interval: | default = 15s] # The grpc_client block configures the gRPC client used to communicate between # a client and server component in Loki. @@ -4223,6 +4228,11 @@ engine: # When true, querier limits sent via a header are enforced. # CLI flag: -querier.per-request-limits-enabled [per_request_limits_enabled: | default = false] + +# When true, querier directs ingester queries to the partition-ingesters instead +# of the normal ingesters. +# CLI flag: -querier.query-partition-ingesters +[query_partition_ingesters: | default = false] ``` ### query_range diff --git a/go.mod b/go.mod index 0c57aaded1b5..bfccc98c18c0 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/grafana/loki/v3 -go 1.21.8 +go 1.22 + +toolchain go1.23.2 require ( cloud.google.com/go/bigtable v1.29.0 @@ -49,7 +51,7 @@ require ( github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.3 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 - github.com/grafana/dskit v0.0.0-20240930165212-f52de24af9bc + github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41 github.com/grafana/go-gelf/v2 v2.0.1 github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc @@ -93,7 +95,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/xdg-go/scram v1.1.2 - go.etcd.io/bbolt v1.3.10 + go.etcd.io/bbolt v1.3.11 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 golang.org/x/crypto v0.27.0 @@ -123,7 +125,7 @@ require ( github.com/efficientgo/core v1.0.0-rc.3 github.com/fsnotify/fsnotify v1.7.0 github.com/gogo/googleapis v1.4.1 - github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 + github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/raft v1.7.1 @@ -136,7 +138,7 @@ require ( github.com/prometheus/common/sigv4 v0.1.0 github.com/richardartoul/molecule v1.0.0 github.com/schollz/progressbar/v3 v3.14.6 - github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 + github.com/shirou/gopsutil/v4 v4.24.8 github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1 github.com/twmb/franz-go v1.17.1 github.com/twmb/franz-go/pkg/kadm v1.13.0 diff --git a/go.sum b/go.sum index eae4cb57fffb..62b957a7ce01 100644 --- a/go.sum +++ b/go.sum @@ -1042,16 +1042,16 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= -github.com/grafana/dskit v0.0.0-20240930165212-f52de24af9bc h1:OLRT3mpHjvTjq4Km7L36lipZ+hTOX1U3jct0DFMADdo= -github.com/grafana/dskit v0.0.0-20240930165212-f52de24af9bc/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= +github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41 h1:a4O59OU3FJZ+EJUVnlvvNTvdAc4uRN1P6EaGwqL9CnA= +github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= -github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 h1:NznuPwItog+rwdVg8hAuGKP29ndRSzJAwhxKldkP8oQ= -github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= +github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3HNyE8efSdyaBbDrdPaWImXyenuKZ/nw= +github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= @@ -1709,8 +1709,8 @@ github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYM github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= -github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 h1:lLPAdP4TpfgJ5byoc3EFwNSKZj8kCnDFHtuWTktWl0s= -github.com/shirou/gopsutil/v4 v4.24.0-alpha.1/go.mod h1:GVpYUxBee6CTWux2/JslZ7fYPwqkQ8YDJSXmGAryYy4= +github.com/shirou/gopsutil/v4 v4.24.8 h1:pVQjIenQkIhqO81mwTaXjTzOMT7d3TZkf43PlVFHENI= +github.com/shirou/gopsutil/v4 v4.24.8/go.mod h1:wE0OrJtj4dG+hYkxqDH3QiBICdKSf04/npcvLLc/oRg= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -1891,8 +1891,8 @@ go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/X go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index ec35fbb249f4..6c499c3ec26d 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -5,8 +5,10 @@ # See ../docs/sources/community/maintaining/release-loki-build-image.md for instructions # on how to publish a new build image. ARG GO_VERSION=1.23 +ARG GOLANG_BASE_IMAGE=golang:${GO_VERSION}-bullseye + # Install helm (https://helm.sh/) and helm-docs (https://github.com/norwoodj/helm-docs) for generating Helm Chart reference. -FROM golang:${GO_VERSION}-bookworm AS helm +FROM ${GOLANG_BASE_IMAGE} AS helm ARG TARGETARCH ARG HELM_VER="v3.2.3" RUN curl -L "https://get.helm.sh/helm-${HELM_VER}-linux-$TARGETARCH.tar.gz" | tar zx && \ @@ -38,7 +40,7 @@ RUN apk add --no-cache curl && \ FROM alpine:3.20.3 AS docker RUN apk add --no-cache docker-cli docker-cli-buildx -FROM golang:${GO_VERSION}-bookworm AS drone +FROM ${GOLANG_BASE_IMAGE} AS drone ARG TARGETARCH RUN curl -L "https://github.com/drone/drone-cli/releases/download/v1.7.0/drone_linux_$TARGETARCH".tar.gz | tar zx && \ install -t /usr/local/bin drone @@ -48,35 +50,35 @@ RUN curl -L "https://github.com/drone/drone-cli/releases/download/v1.7.0/drone_l # Error: # github.com/fatih/faillint@v1.5.0 requires golang.org/x/tools@v0.0.0-20200207224406-61798d64f025 # (not golang.org/x/tools@v0.0.0-20190918214920-58d531046acd from golang.org/x/tools/cmd/goyacc@58d531046acdc757f177387bc1725bfa79895d69) -FROM golang:${GO_VERSION}-bookworm AS faillint +FROM ${GOLANG_BASE_IMAGE} AS faillint RUN GO111MODULE=on go install github.com/fatih/faillint@v1.12.0 RUN GO111MODULE=on go install golang.org/x/tools/cmd/goimports@v0.7.0 -FROM golang:${GO_VERSION}-bookworm AS delve +FROM ${GOLANG_BASE_IMAGE} AS delve RUN GO111MODULE=on go install github.com/go-delve/delve/cmd/dlv@latest # Install ghr used to push binaries and template the release # This collides with the version of go tools used in the base image, thus we install it in its own image and copy it over. -FROM golang:${GO_VERSION}-bookworm AS ghr +FROM ${GOLANG_BASE_IMAGE} AS ghr RUN GO111MODULE=on go install github.com/tcnksm/ghr@9349474 # Install nfpm (https://nfpm.goreleaser.com) for creating .deb and .rpm packages. -FROM golang:${GO_VERSION}-bookworm AS nfpm +FROM ${GOLANG_BASE_IMAGE} AS nfpm RUN GO111MODULE=on go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.11.3 # Install gotestsum -FROM golang:${GO_VERSION}-bookworm AS gotestsum +FROM ${GOLANG_BASE_IMAGE} AS gotestsum RUN GO111MODULE=on go install gotest.tools/gotestsum@v1.8.2 # Install tools used to compile jsonnet. -FROM golang:${GO_VERSION}-bookworm AS jsonnet +FROM ${GOLANG_BASE_IMAGE} AS jsonnet RUN GO111MODULE=on go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@v0.5.1 RUN GO111MODULE=on go install github.com/monitoring-mixins/mixtool/cmd/mixtool@16dc166166d91e93475b86b9355a4faed2400c18 RUN GO111MODULE=on go install github.com/google/go-jsonnet/cmd/jsonnet@v0.20.0 FROM aquasec/trivy AS trivy -FROM golang:${GO_VERSION}-bookworm +FROM ${GOLANG_BASE_IMAGE} RUN apt-get update && \ apt-get install -qy \ musl gnupg ragel \ diff --git a/pkg/bloombuild/builder/batch_test.go b/pkg/bloombuild/builder/batch_test.go index e0fe37a0e448..608ab0807c9e 100644 --- a/pkg/bloombuild/builder/batch_test.go +++ b/pkg/bloombuild/builder/batch_test.go @@ -106,7 +106,6 @@ func TestBatchedLoader(t *testing.T) { inputs: [][]int{{0}}, }, } { - tc := tc t.Run(tc.desc, func(t *testing.T) { fetchers := make([]Fetcher[int, int], 0, len(tc.inputs)) for range tc.inputs { @@ -193,7 +192,6 @@ func TestOverlappingBlocksIter(t *testing.T) { exp: 2, }, } { - tc := tc t.Run(tc.desc, func(t *testing.T) { it := overlappingBlocksIter(tc.inp) var overlapping [][]bloomshipper.BlockRef diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go index 2529a678e779..a873d04960b4 100644 --- a/pkg/bloomgateway/client.go +++ b/pkg/bloomgateway/client.go @@ -161,7 +161,7 @@ func NewClient( } } - poolFactory := func(addr string) (ringclient.PoolClient, error) { + clientFactory := func(addr string) (ringclient.PoolClient, error) { pool, err := NewBloomGatewayGRPCPool(addr, dialOpts) if err != nil { return nil, errors.Wrap(err, "new bloom gateway grpc pool") @@ -185,17 +185,10 @@ func NewClient( // Make an attempt to do one DNS lookup so we can start with addresses dnsProvider.RunOnce() - clientPool := ringclient.NewPool( - "bloom-gateway", - ringclient.PoolConfig(cfg.PoolConfig), - func() ([]string, error) { return dnsProvider.Addresses(), nil }, - ringclient.PoolAddrFunc(poolFactory), - metrics.clients, - logger, - ) - - pool := NewJumpHashClientPool(clientPool, dnsProvider, cfg.PoolConfig.CheckInterval, logger) - pool.Start() + pool, err := NewJumpHashClientPool(clientFactory, dnsProvider, cfg.PoolConfig.CheckInterval, logger) + if err != nil { + return nil, err + } return &GatewayClient{ cfg: cfg, diff --git a/pkg/bloomgateway/client_pool.go b/pkg/bloomgateway/client_pool.go index 989ced34c673..4b45292bef88 100644 --- a/pkg/bloomgateway/client_pool.go +++ b/pkg/bloomgateway/client_pool.go @@ -3,7 +3,7 @@ package bloomgateway import ( "context" "flag" - "sort" + "sync" "time" "github.com/go-kit/log" @@ -15,38 +15,45 @@ import ( ) // PoolConfig is config for creating a Pool. -// It has the same fields as "github.com/grafana/dskit/ring/client.PoolConfig" so it can be cast. type PoolConfig struct { - CheckInterval time.Duration `yaml:"check_interval"` - HealthCheckEnabled bool `yaml:"enable_health_check"` - HealthCheckTimeout time.Duration `yaml:"health_check_timeout"` - MaxConcurrentHealthChecks int `yaml:"-"` + CheckInterval time.Duration `yaml:"check_interval"` } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *PoolConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.DurationVar(&cfg.CheckInterval, prefix+"check-interval", 10*time.Second, "How frequently to clean up clients for servers that have gone away or are unhealthy.") - f.BoolVar(&cfg.HealthCheckEnabled, prefix+"enable-health-check", true, "Run a health check on each server during periodic cleanup.") - f.DurationVar(&cfg.HealthCheckTimeout, prefix+"health-check-timeout", 1*time.Second, "Timeout for the health check if health check is enabled.") + f.DurationVar(&cfg.CheckInterval, prefix+"check-interval", 15*time.Second, "How frequently to update the list of servers.") } func (cfg *PoolConfig) Validate() error { return nil } +// compiler check +var _ clientPool = &JumpHashClientPool{} + +type ClientFactory func(addr string) (client.PoolClient, error) + +func (f ClientFactory) New(addr string) (client.PoolClient, error) { + return f(addr) +} + type JumpHashClientPool struct { - *client.Pool + services.Service *jumphash.Selector + sync.RWMutex + + provider AddressProvider + logger log.Logger - done chan struct{} - logger log.Logger + clients map[string]client.PoolClient + clientFactory ClientFactory } type AddressProvider interface { Addresses() []string } -func NewJumpHashClientPool(pool *client.Pool, dnsProvider AddressProvider, updateInterval time.Duration, logger log.Logger) *JumpHashClientPool { +func NewJumpHashClientPool(clientFactory ClientFactory, dnsProvider AddressProvider, updateInterval time.Duration, logger log.Logger) (*JumpHashClientPool, error) { selector := jumphash.DefaultSelector() err := selector.SetServers(dnsProvider.Addresses()...) if err != nil { @@ -54,14 +61,19 @@ func NewJumpHashClientPool(pool *client.Pool, dnsProvider AddressProvider, updat } p := &JumpHashClientPool{ - Pool: pool, - Selector: selector, - done: make(chan struct{}), - logger: logger, + Selector: selector, + clientFactory: clientFactory, + provider: dnsProvider, + logger: logger, + clients: make(map[string]client.PoolClient, len(dnsProvider.Addresses())), } - go p.updateLoop(dnsProvider, updateInterval) - return p + p.Service = services.NewTimerService(updateInterval, nil, p.updateLoop, nil) + return p, services.StartAndAwaitRunning(context.Background(), p.Service) +} + +func (p *JumpHashClientPool) Stop() { + _ = services.StopAndAwaitTerminated(context.Background(), p.Service) } func (p *JumpHashClientPool) AddrForFingerprint(fp uint64) (string, error) { @@ -80,35 +92,42 @@ func (p *JumpHashClientPool) Addr(key string) (string, error) { return addr.String(), nil } -func (p *JumpHashClientPool) Start() { - ctx := context.Background() - _ = services.StartAndAwaitRunning(ctx, p.Pool) +func (p *JumpHashClientPool) updateLoop(_ context.Context) error { + err := p.SetServers(p.provider.Addresses()...) + if err != nil { + level.Warn(p.logger).Log("msg", "error updating servers", "err", err) + } + return nil } -func (p *JumpHashClientPool) Stop() { - ctx := context.Background() - _ = services.StopAndAwaitTerminated(ctx, p.Pool) - close(p.done) -} +// GetClientFor implements clientPool. +func (p *JumpHashClientPool) GetClientFor(addr string) (client.PoolClient, error) { + client, ok := p.fromCache(addr) + if ok { + return client, nil + } + + // No client in cache so create one + p.Lock() + defer p.Unlock() -func (p *JumpHashClientPool) updateLoop(provider AddressProvider, updateInterval time.Duration) { - ticker := time.NewTicker(updateInterval) - defer ticker.Stop() - - for { - select { - case <-p.done: - return - case <-ticker.C: - servers := provider.Addresses() - // ServerList deterministically maps keys to _index_ of the server list. - // Since DNS returns records in different order each time, we sort to - // guarantee best possible match between nodes. - sort.Strings(servers) - err := p.SetServers(servers...) - if err != nil { - level.Warn(p.logger).Log("msg", "error updating servers", "err", err) - } - } + // Check if a client has been created just after checking the cache and before acquiring the lock. + client, ok = p.clients[addr] + if ok { + return client, nil } + + client, err := p.clientFactory.New(addr) + if err != nil { + return nil, err + } + p.clients[addr] = client + return client, nil +} + +func (p *JumpHashClientPool) fromCache(addr string) (client.PoolClient, bool) { + p.RLock() + defer p.RUnlock() + client, ok := p.clients[addr] + return client, ok } diff --git a/pkg/bloomgateway/client_pool_test.go b/pkg/bloomgateway/client_pool_test.go index 5e3792861f4c..a592bf241786 100644 --- a/pkg/bloomgateway/client_pool_test.go +++ b/pkg/bloomgateway/client_pool_test.go @@ -31,7 +31,8 @@ func TestJumpHashClientPool_UpdateLoop(t *testing.T) { provider := &provider{} provider.UpdateAddresses([]string{"localhost:9095"}) - pool := NewJumpHashClientPool(nil, provider, interval, log.NewNopLogger()) + pool, err := NewJumpHashClientPool(nil, provider, interval, log.NewNopLogger()) + require.NoError(t, err) require.Len(t, pool.Addrs(), 1) require.Equal(t, "127.0.0.1:9095", pool.Addrs()[0].String()) diff --git a/pkg/bloomgateway/metrics.go b/pkg/bloomgateway/metrics.go index 9fe096eec2ac..4eeffbf8ad68 100644 --- a/pkg/bloomgateway/metrics.go +++ b/pkg/bloomgateway/metrics.go @@ -119,7 +119,7 @@ type workerMetrics struct { dequeueDuration *prometheus.HistogramVec queueDuration *prometheus.HistogramVec processDuration *prometheus.HistogramVec - tasksDequeued *prometheus.CounterVec + tasksDequeued *prometheus.HistogramVec tasksProcessed *prometheus.CounterVec blocksNotAvailable *prometheus.CounterVec blockQueryLatency *prometheus.HistogramVec @@ -147,11 +147,12 @@ func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem str Name: "process_duration_seconds", Help: "Time spent processing tasks in seconds", }, append(labels, "status")), - tasksDequeued: r.NewCounterVec(prometheus.CounterOpts{ + tasksDequeued: r.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "tasks_dequeued_total", - Help: "Total amount of tasks that the worker dequeued from the queue", + Name: "tasks_dequeued", + Help: "Total amount of tasks that the worker dequeued from the queue at once", + Buckets: prometheus.ExponentialBuckets(1, 2, 8), // [1, 2, ..., 128] }, append(labels, "status")), tasksProcessed: r.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go index 6aa1082b8933..81092448ab52 100644 --- a/pkg/bloomgateway/worker.go +++ b/pkg/bloomgateway/worker.go @@ -76,7 +76,7 @@ func (w *worker) running(_ context.Context) error { if err == queue.ErrStopped && len(items) == 0 { return err } - w.metrics.tasksDequeued.WithLabelValues(w.id, labelFailure).Inc() + w.metrics.tasksDequeued.WithLabelValues(w.id, labelFailure).Observe(1) level.Error(w.logger).Log("msg", "failed to dequeue tasks", "err", err, "items", len(items)) } idx = newIdx @@ -86,7 +86,7 @@ func (w *worker) running(_ context.Context) error { continue } - w.metrics.tasksDequeued.WithLabelValues(w.id, labelSuccess).Add(float64(len(items))) + w.metrics.tasksDequeued.WithLabelValues(w.id, labelSuccess).Observe(float64(len(items))) tasks := make([]Task, 0, len(items)) for _, item := range items { diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index 24d4ab2d2c2c..c8795fa190bb 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -86,7 +86,6 @@ const ( func TestBlocksInclusive(t *testing.T) { for _, enc := range testEncodings { - enc := enc for _, format := range allPossibleFormats { chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt chk := NewMemChunk(chunkfmt, enc, headfmt, testBlockSize, testTargetSize) @@ -105,7 +104,6 @@ func TestBlocksInclusive(t *testing.T) { func TestBlock(t *testing.T) { for _, enc := range testEncodings { - enc := enc for _, format := range allPossibleFormats { chunkFormat, headBlockFmt := format.chunkFormat, format.headBlockFmt t.Run(fmt.Sprintf("encoding:%v chunkFormat:%v headBlockFmt:%v", enc, chunkFormat, headBlockFmt), func(t *testing.T) { @@ -260,7 +258,6 @@ func TestBlock(t *testing.T) { func TestCorruptChunk(t *testing.T) { for _, enc := range testEncodings { - enc := enc for _, format := range allPossibleFormats { chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt @@ -337,7 +334,6 @@ func TestReadFormatV1(t *testing.T) { func TestRoundtripV2(t *testing.T) { for _, testData := range allPossibleFormats { for _, enc := range testEncodings { - enc := enc t.Run(testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) { t.Parallel() @@ -397,7 +393,6 @@ func testNameWithFormats(enc compression.Codec, chunkFormat byte, headBlockFmt H func TestRoundtripV3(t *testing.T) { for _, enc := range testEncodings { - enc := enc for _, format := range allPossibleFormats { chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt t.Run(fmt.Sprintf("%v-%v", format, enc), func(t *testing.T) { @@ -422,10 +417,8 @@ func TestRoundtripV3(t *testing.T) { func TestSerialization(t *testing.T) { for _, testData := range allPossibleFormats { for _, enc := range testEncodings { - enc := enc // run tests with and without structured metadata since it is optional for _, appendWithStructuredMetadata := range []bool{false, true} { - appendWithStructuredMetadata := appendWithStructuredMetadata testName := testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt) if appendWithStructuredMetadata { testName = fmt.Sprintf("%s - append structured metadata", testName) @@ -511,7 +504,6 @@ func TestSerialization(t *testing.T) { func TestChunkFilling(t *testing.T) { for _, testData := range allPossibleFormats { for _, enc := range testEncodings { - enc := enc t.Run(testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) { t.Parallel() @@ -676,8 +668,6 @@ func TestMemChunk_AppendOutOfOrder(t *testing.T) { for _, f := range HeadBlockFmts { for testName, tester := range tests { - tester := tester - t.Run(testName, func(t *testing.T) { t.Parallel() @@ -1117,7 +1107,6 @@ func TestMemChunk_IteratorBounds(t *testing.T) { t.Run( fmt.Sprintf("mint:%d,maxt:%d,direction:%s", tt.mint.UnixNano(), tt.maxt.UnixNano(), tt.direction), func(t *testing.T) { - tt := tt c := createChunk() noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) @@ -1143,7 +1132,6 @@ func TestMemChunk_IteratorBounds(t *testing.T) { func TestMemchunkLongLine(t *testing.T) { for _, enc := range testEncodings { - enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() @@ -1777,7 +1765,6 @@ func TestMemChunk_SpaceFor(t *testing.T) { func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) { for _, enc := range testEncodings { - enc := enc t.Run(enc.String(), func(t *testing.T) { streamLabels := labels.Labels{ {Name: "job", Value: "fake"}, diff --git a/pkg/compactor/retention/retention_test.go b/pkg/compactor/retention/retention_test.go index b68d9e39f42c..cdc7ef61dbc9 100644 --- a/pkg/compactor/retention/retention_test.go +++ b/pkg/compactor/retention/retention_test.go @@ -154,7 +154,6 @@ func Test_Retention(t *testing.T) { }, }, } { - tt := tt t.Run(tt.name, func(t *testing.T) { // insert in the store. var ( @@ -566,7 +565,6 @@ func TestChunkRewriter(t *testing.T) { }, }, } { - tt := tt t.Run(tt.name, func(t *testing.T) { store := newTestStore(t) require.NoError(t, store.Put(context.TODO(), []chunk.Chunk{tt.chunk})) diff --git a/pkg/compactor/table_test.go b/pkg/compactor/table_test.go index b4f71bbc9395..c659bda051f3 100644 --- a/pkg/compactor/table_test.go +++ b/pkg/compactor/table_test.go @@ -350,7 +350,6 @@ func TestTable_CompactionRetention(t *testing.T) { }), }, } { - tt := tt commonDBsConfig := IndexesConfig{ NumCompactedFiles: tt.dbsSetup.numCompactedDBs, NumUnCompactedFiles: tt.dbsSetup.numUnCompactedCommonDBs, diff --git a/pkg/compression/pool_test.go b/pkg/compression/pool_test.go index fc5ba08a0d48..4d7d7ee15400 100644 --- a/pkg/compression/pool_test.go +++ b/pkg/compression/pool_test.go @@ -16,7 +16,6 @@ import ( func TestPool(t *testing.T) { for _, enc := range supportedCodecs { - enc := enc t.Run(enc.String(), func(t *testing.T) { var wg sync.WaitGroup diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 01dae3ee6e0c..ac815cbe82cd 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -279,11 +279,13 @@ func New( Help: "Total number of times the distributor has sharded streams", }), kafkaAppends: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Name: "kafka_appends_total", - Help: "The total number of appends sent to kafka ingest path.", + Namespace: constants.Loki, + Name: "distributor_kafka_appends_total", + Help: "The total number of appends sent to kafka ingest path.", }, []string{"partition", "status"}), kafkaWriteLatency: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "kafka_latency_seconds", + Namespace: constants.Loki, + Name: "distributor_kafka_latency_seconds", Help: "Latency to write an incoming request to the ingest storage.", NativeHistogramBucketFactor: 1.1, NativeHistogramMinResetDuration: 1 * time.Hour, @@ -291,13 +293,15 @@ func New( Buckets: prometheus.DefBuckets, }), kafkaWriteBytesTotal: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "kafka_sent_bytes_total", - Help: "Total number of bytes sent to the ingest storage.", + Namespace: constants.Loki, + Name: "distributor_kafka_sent_bytes_total", + Help: "Total number of bytes sent to the ingest storage.", }), kafkaRecordsPerRequest: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "kafka_records_per_write_request", - Help: "The number of records a single per-partition write request has been split into.", - Buckets: prometheus.ExponentialBuckets(1, 2, 8), + Namespace: constants.Loki, + Name: "distributor_kafka_records_per_write_request", + Help: "The number of records a single per-partition write request has been split into.", + Buckets: prometheus.ExponentialBuckets(1, 2, 8), }), writeFailuresManager: writefailures.NewManager(logger, registerer, cfg.WriteFailuresLogging, configs, "distributor"), kafkaWriter: kafkaWriter, diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index cebd46858e17..86afd0b1d659 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -401,8 +401,6 @@ func Test_IncrementTimestamp(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { ing := &mockIngester{} distributors, _ := prepare(t, 1, 3, testData.limits, func(_ string) (ring_client.PoolClient, error) { return ing, nil }) @@ -1216,8 +1214,6 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) diff --git a/pkg/distributor/ingestion_rate_strategy_test.go b/pkg/distributor/ingestion_rate_strategy_test.go index 657d34290984..02463264cd0d 100644 --- a/pkg/distributor/ingestion_rate_strategy_test.go +++ b/pkg/distributor/ingestion_rate_strategy_test.go @@ -63,8 +63,6 @@ func TestIngestionRateStrategy(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { var strategy limiter.RateLimiterStrategy diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go index 9f1db601bb72..402768988bb5 100644 --- a/pkg/ingester/checkpoint_test.go +++ b/pkg/ingester/checkpoint_test.go @@ -458,7 +458,8 @@ func Test_SeriesIterator(t *testing.T) { limits, err := validation.NewOverrides(l, nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) for i := 0; i < 3; i++ { inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil) @@ -505,7 +506,7 @@ func Benchmark_SeriesIterator(b *testing.B) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(b, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) for i := range instances { inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil) diff --git a/pkg/ingester/index/bitprefix_test.go b/pkg/ingester/index/bitprefix_test.go index 9832e15ed60c..8bf524c1a2e3 100644 --- a/pkg/ingester/index/bitprefix_test.go +++ b/pkg/ingester/index/bitprefix_test.go @@ -38,7 +38,6 @@ func Test_BitPrefixGetShards(t *testing.T) { {8, true, logql.NewPowerOfTwoShard(index.ShardAnnotation{Shard: 4, Of: 16}).Ptr(), []uint32{2}}, {8, true, logql.NewPowerOfTwoShard(index.ShardAnnotation{Shard: 15, Of: 16}).Ptr(), []uint32{7}}, } { - tt := tt t.Run(tt.shard.String()+fmt.Sprintf("_total_%d", tt.total), func(t *testing.T) { ii, err := NewBitPrefixWithShards(tt.total) require.Nil(t, err) diff --git a/pkg/ingester/index/index_test.go b/pkg/ingester/index/index_test.go index 23873cbfc3fd..2e5413bd9b97 100644 --- a/pkg/ingester/index/index_test.go +++ b/pkg/ingester/index/index_test.go @@ -32,7 +32,6 @@ func Test_GetShards(t *testing.T) { {32, &index.ShardAnnotation{Shard: 15, Of: 16}, []uint32{15, 31}}, {64, &index.ShardAnnotation{Shard: 15, Of: 16}, []uint32{15, 31, 47, 63}}, } { - tt := tt t.Run(tt.shard.String()+fmt.Sprintf("_total_%d", tt.total), func(t *testing.T) { ii := NewWithShards(tt.total) res := ii.getShards(tt.shard) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 1f3a39415fa3..40028cfcd528 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -300,7 +300,7 @@ type Ingester struct { } // New makes a new Ingester. -func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker, readRing ring.ReadRing, partitionRingWatcher *ring.PartitionRingWatcher) (*Ingester, error) { +func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker, readRing ring.ReadRing, partitionRingWatcher ring.PartitionRingReader) (*Ingester, error) { if cfg.ingesterClientFactory == nil { cfg.ingesterClientFactory = client.New } @@ -388,10 +388,6 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con i.lifecyclerWatcher.WatchService(i.partitionReader) } - // Now that the lifecycler has been created, we can create the limiter - // which depends on it. - i.limiter = NewLimiter(limits, metrics, i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor) - i.Service = services.NewBasicService(i.starting, i.running, i.stopping) i.setupAutoForget() @@ -408,12 +404,18 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con i.SetExtractorWrapper(i.cfg.SampleExtractorWrapper) } + var limiterStrategy limiterRingStrategy var ownedStreamsStrategy ownershipStrategy if i.cfg.KafkaIngestion.Enabled { + limiterStrategy = newPartitionRingLimiterStrategy(partitionRingWatcher, limits.IngestionPartitionsTenantShardSize) ownedStreamsStrategy = newOwnedStreamsPartitionStrategy(i.ingestPartitionID, partitionRingWatcher, limits.IngestionPartitionsTenantShardSize, util_log.Logger) } else { + limiterStrategy = newIngesterRingLimiterStrategy(i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor) ownedStreamsStrategy = newOwnedStreamsIngesterStrategy(i.lifecycler.ID, i.readRing, util_log.Logger) } + // Now that the lifecycler has been created, we can create the limiter + // which depends on it. + i.limiter = NewLimiter(limits, metrics, limiterStrategy) i.recalculateOwnedStreams = newRecalculateOwnedStreamsSvc(i.getInstances, ownedStreamsStrategy, cfg.OwnedStreamsCheckInterval, util_log.Logger) return i, nil diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index 4086831bb33f..819577540f66 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -78,7 +78,7 @@ var NilMetrics = newIngesterMetrics(nil, constants.Loki) func TestLabelsCollisions(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.Nil(t, err) @@ -106,7 +106,7 @@ func TestLabelsCollisions(t *testing.T) { func TestConcurrentPushes(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.Nil(t, err) @@ -158,7 +158,7 @@ func TestConcurrentPushes(t *testing.T) { func TestGetStreamRates(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.NoError(t, err) @@ -245,7 +245,7 @@ func labelHashNoShard(l labels.Labels) uint64 { func TestSyncPeriod(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) const ( syncPeriod = 1 * time.Minute @@ -290,7 +290,7 @@ func setupTestStreams(t *testing.T) (*instance, time.Time, int) { t.Helper() limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) indexShards := 2 // just some random values @@ -507,7 +507,7 @@ func makeRandomLabels() labels.Labels { func Benchmark_PushInstance(b *testing.B) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(b, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) ctx := context.Background() @@ -549,7 +549,7 @@ func Benchmark_instance_addNewTailer(b *testing.B) { l.MaxLocalStreamsPerUser = 100000 limits, err := validation.NewOverrides(l, nil) require.NoError(b, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) ctx := context.Background() @@ -1089,7 +1089,7 @@ func TestStreamShardingUsage(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), limitsDefinition) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) defaultShardStreamsCfg := limiter.limits.ShardStreams("fake") tenantShardStreamsCfg := limiter.limits.ShardStreams(customTenant1) @@ -1454,7 +1454,7 @@ func defaultInstance(t *testing.T) *instance { &ingesterConfig, defaultPeriodConfigs, "fake", - NewLimiter(overrides, NilMetrics, &ringCountMock{count: 1}, 1), + NewLimiter(overrides, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)), loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, diff --git a/pkg/ingester/kafka_consumer.go b/pkg/ingester/kafka_consumer.go index c2fe90ee052f..0e6185fbe796 100644 --- a/pkg/ingester/kafka_consumer.go +++ b/pkg/ingester/kafka_consumer.go @@ -3,7 +3,7 @@ package ingester import ( "context" "errors" - math "math" + "math" "sync" "time" diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go index a9ddd2ba3ba3..fd3a5c7c1b9b 100644 --- a/pkg/ingester/limiter.go +++ b/pkg/ingester/limiter.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/grafana/dskit/ring" "golang.org/x/time/rate" "github.com/grafana/loki/v3/pkg/distributor/shardstreams" @@ -13,7 +14,7 @@ import ( ) const ( - errMaxStreamsPerUserLimitExceeded = "tenant '%v' per-user streams limit exceeded, streams: %d exceeds calculated limit: %d (local limit: %d, global limit: %d, global/ingesters: %d)" + errMaxStreamsPerUserLimitExceeded = "tenant '%v' per-user streams limit exceeded, streams: %d exceeds calculated limit: %d (local limit: %d, global limit: %d, local share: %d)" ) // RingCount is the interface exposed by a ring implementation which allows @@ -37,10 +38,9 @@ type Limits interface { // Limiter implements primitives to get the maximum number of streams // an ingester can handle for a specific tenant type Limiter struct { - limits Limits - ring RingCount - replicationFactor int - metrics *ingesterMetrics + limits Limits + ringStrategy limiterRingStrategy + metrics *ingesterMetrics mtx sync.RWMutex disabled bool @@ -60,13 +60,16 @@ func (l *Limiter) Enable() { l.metrics.limiterEnabled.Set(1) } +type limiterRingStrategy interface { + convertGlobalToLocalLimit(int, string) int +} + // NewLimiter makes a new limiter -func NewLimiter(limits Limits, metrics *ingesterMetrics, ring RingCount, replicationFactor int) *Limiter { +func NewLimiter(limits Limits, metrics *ingesterMetrics, ingesterRingLimiterStrategy limiterRingStrategy) *Limiter { return &Limiter{ - limits: limits, - ring: ring, - replicationFactor: replicationFactor, - metrics: metrics, + limits: limits, + ringStrategy: ingesterRingLimiterStrategy, + metrics: metrics, } } @@ -87,7 +90,7 @@ func (l *Limiter) GetStreamCountLimit(tenantID string) (calculatedLimit, localLi // We can assume that streams are evenly distributed across ingesters // so we do convert the global limit into a local limit globalLimit = l.limits.MaxGlobalStreamsPerUser(tenantID) - adjustedGlobalLimit = l.convertGlobalToLocalLimit(globalLimit) + adjustedGlobalLimit = l.ringStrategy.convertGlobalToLocalLimit(globalLimit, tenantID) // Set the calculated limit to the lesser of the local limit or the new calculated global limit calculatedLimit = l.minNonZero(localLimit, adjustedGlobalLimit) @@ -108,20 +111,32 @@ func (l *Limiter) minNonZero(first, second int) int { return first } -func (l *Limiter) convertGlobalToLocalLimit(globalLimit int) int { +type ingesterRingLimiterStrategy struct { + ring RingCount + replicationFactor int +} + +func newIngesterRingLimiterStrategy(ring RingCount, replicationFactor int) *ingesterRingLimiterStrategy { + return &ingesterRingLimiterStrategy{ + ring: ring, + replicationFactor: replicationFactor, + } +} + +func (l *ingesterRingLimiterStrategy) convertGlobalToLocalLimit(globalLimit int, _ string) int { if globalLimit == 0 || l.replicationFactor == 0 { return 0 } zonesCount := l.ring.ZonesCount() if zonesCount <= 1 { - return calculateLimitForSingleZone(globalLimit, l) + return l.calculateLimitForSingleZone(globalLimit) } - return calculateLimitForMultipleZones(globalLimit, zonesCount, l) + return l.calculateLimitForMultipleZones(globalLimit, zonesCount) } -func calculateLimitForSingleZone(globalLimit int, l *Limiter) int { +func (l *ingesterRingLimiterStrategy) calculateLimitForSingleZone(globalLimit int) int { numIngesters := l.ring.HealthyInstancesCount() if numIngesters > 0 { return int((float64(globalLimit) / float64(numIngesters)) * float64(l.replicationFactor)) @@ -129,7 +144,7 @@ func calculateLimitForSingleZone(globalLimit int, l *Limiter) int { return 0 } -func calculateLimitForMultipleZones(globalLimit, zonesCount int, l *Limiter) int { +func (l *ingesterRingLimiterStrategy) calculateLimitForMultipleZones(globalLimit, zonesCount int) int { ingestersInZone := l.ring.HealthyInstancesInZoneCount() if ingestersInZone > 0 { return int((float64(globalLimit) * float64(l.replicationFactor)) / float64(zonesCount) / float64(ingestersInZone)) @@ -137,6 +152,34 @@ func calculateLimitForMultipleZones(globalLimit, zonesCount int, l *Limiter) int return 0 } +type partitionRingLimiterStrategy struct { + ring ring.PartitionRingReader + getPartitionShardSize func(user string) int +} + +func newPartitionRingLimiterStrategy(ring ring.PartitionRingReader, getPartitionShardSize func(user string) int) *partitionRingLimiterStrategy { + return &partitionRingLimiterStrategy{ + ring: ring, + getPartitionShardSize: getPartitionShardSize, + } +} + +func (l *partitionRingLimiterStrategy) convertGlobalToLocalLimit(globalLimit int, tenantID string) int { + if globalLimit == 0 { + return 0 + } + + userShardSize := l.getPartitionShardSize(tenantID) + + // ShuffleShardSize correctly handles cases when user has no shard config or more shards than number of active partitions in the ring. + activePartitionsForUser := l.ring.PartitionRing().ShuffleShardSize(userShardSize) + + if activePartitionsForUser == 0 { + return 0 + } + return int(float64(globalLimit) / float64(activePartitionsForUser)) +} + type supplier[T any] func() T type streamCountLimiter struct { diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go index 0d0055d0a0af..b8c7fe72e359 100644 --- a/pkg/ingester/limiter_test.go +++ b/pkg/ingester/limiter_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/grafana/dskit/ring" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -14,12 +15,18 @@ import ( "github.com/grafana/loki/v3/pkg/validation" ) +type fixedStrategy struct { + localLimit int +} + +func (strategy *fixedStrategy) convertGlobalToLocalLimit(_ int, _ string) int { + return strategy.localLimit +} func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) { tests := map[string]struct { maxLocalStreamsPerUser int maxGlobalStreamsPerUser int - ringReplicationFactor int - ringIngesterCount int + calculatedLocalLimit int streams int expected error useOwnedStreamService bool @@ -29,80 +36,63 @@ func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) { "both local and global limit are disabled": { maxLocalStreamsPerUser: 0, maxGlobalStreamsPerUser: 0, - ringReplicationFactor: 1, - ringIngesterCount: 1, + calculatedLocalLimit: 0, streams: 100, expected: nil, }, "current number of streams is below the limit": { maxLocalStreamsPerUser: 0, maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, + calculatedLocalLimit: 300, streams: 299, expected: nil, }, "current number of streams is above the limit": { maxLocalStreamsPerUser: 0, maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, + calculatedLocalLimit: 300, streams: 300, expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 300, 300, 0, 1000, 300), }, "both local and global limits are disabled": { maxLocalStreamsPerUser: 0, maxGlobalStreamsPerUser: 0, - ringReplicationFactor: 1, - ringIngesterCount: 1, + calculatedLocalLimit: 0, streams: math.MaxInt32 - 1, expected: nil, }, "only local limit is enabled": { maxLocalStreamsPerUser: 1000, maxGlobalStreamsPerUser: 0, - ringReplicationFactor: 1, - ringIngesterCount: 1, + calculatedLocalLimit: 1000, streams: 3000, - expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 3000, 1000, 1000, 0, 0), + expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 3000, 1000, 1000, 0, 1000), }, - "only global limit is enabled with replication-factor=1": { + "only global limit is enabled": { maxLocalStreamsPerUser: 0, maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 1, - ringIngesterCount: 10, + calculatedLocalLimit: 100, streams: 3000, expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 3000, 100, 0, 1000, 100), }, - "only global limit is enabled with replication-factor=3": { - maxLocalStreamsPerUser: 0, - maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, - streams: 3000, - expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 3000, 300, 0, 1000, 300), - }, "both local and global limits are set with local limit < global limit": { maxLocalStreamsPerUser: 150, maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, + calculatedLocalLimit: 150, streams: 3000, - expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 3000, 150, 150, 1000, 300), + expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 3000, 150, 150, 1000, 150), }, "both local and global limits are set with local limit > global limit": { maxLocalStreamsPerUser: 500, maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, + calculatedLocalLimit: 300, streams: 3000, expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 3000, 300, 500, 1000, 300), }, "actual limit must be used if it's greater than fixed limit": { maxLocalStreamsPerUser: 500, maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, + calculatedLocalLimit: 300, useOwnedStreamService: true, fixedLimit: 20, ownedStreamCount: 3000, @@ -111,18 +101,16 @@ func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) { "fixed limit must be used if it's greater than actual limit": { maxLocalStreamsPerUser: 500, maxGlobalStreamsPerUser: 1000, - ringReplicationFactor: 3, - ringIngesterCount: 10, + calculatedLocalLimit: 500, useOwnedStreamService: true, fixedLimit: 2000, ownedStreamCount: 2001, - expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 2001, 2000, 500, 1000, 300), + expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, "test", 2001, 2000, 500, 1000, 500), }, "fixed limit must not be used if both limits are disabled": { maxLocalStreamsPerUser: 0, maxGlobalStreamsPerUser: 0, - ringReplicationFactor: 3, - ringIngesterCount: 10, + calculatedLocalLimit: 0, useOwnedStreamService: true, fixedLimit: 2000, ownedStreamCount: 2001, @@ -131,12 +119,7 @@ func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { - // Mock the ring - ring := &ringCountMock{count: testData.ringIngesterCount} - // Mock limits limits, err := validation.NewOverrides(validation.Limits{ MaxLocalStreamsPerUser: testData.maxLocalStreamsPerUser, @@ -149,7 +132,8 @@ func TestStreamCountLimiter_AssertNewStreamAllowed(t *testing.T) { fixedLimit: atomic.NewInt32(testData.fixedLimit), ownedStreamCount: testData.ownedStreamCount, } - limiter := NewLimiter(limits, NilMetrics, ring, testData.ringReplicationFactor) + strategy := &fixedStrategy{localLimit: testData.calculatedLocalLimit} + limiter := NewLimiter(limits, NilMetrics, strategy) defaultCountSupplier := func() int { return testData.streams } @@ -197,10 +181,8 @@ func TestLimiter_minNonZero(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { - limiter := NewLimiter(nil, NilMetrics, nil, 0) + limiter := NewLimiter(nil, NilMetrics, nil) assert.Equal(t, testData.expected, limiter.minNonZero(testData.first, testData.second)) }) } @@ -281,7 +263,7 @@ func (m *MockRing) HealthyInstancesInZoneCount() int { return m.healthyInstancesInZoneCount } -func TestConvertGlobalToLocalLimit(t *testing.T) { +func TestConvertGlobalToLocalLimit_IngesterRing(t *testing.T) { tests := []struct { name string globalLimit int @@ -299,19 +281,87 @@ func TestConvertGlobalToLocalLimit(t *testing.T) { } for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name+"_ingesterStrategy", func(t *testing.T) { mockRing := &MockRing{ zonesCount: tc.zonesCount, healthyInstancesCount: tc.healthyInstancesCount, healthyInstancesInZoneCount: tc.healthyInstancesInZoneCount, } - limiter := &Limiter{ - ring: mockRing, - replicationFactor: tc.replicationFactor, + strategy := newIngesterRingLimiterStrategy(mockRing, tc.replicationFactor) + + localLimit := strategy.convertGlobalToLocalLimit(tc.globalLimit, "test") + if localLimit != tc.expectedLocalLimit { + t.Errorf("expected %d, got %d", tc.expectedLocalLimit, localLimit) + } + }) + } +} + +func newMockPartitionRingWithPartitions(activeCount int, inactiveCount int) *ring.PartitionRing { + partitionRing := ring.PartitionRingDesc{ + Partitions: map[int32]ring.PartitionDesc{}, + Owners: map[string]ring.OwnerDesc{}, + } + + for i := 0; i < activeCount; i++ { + id := int32(i) + + partitionRing.Partitions[id] = ring.PartitionDesc{ + Id: id, + Tokens: []uint32{uint32(id)}, + State: ring.PartitionActive, + } + partitionRing.Owners[fmt.Sprintf("test%d", id)] = ring.OwnerDesc{ + OwnedPartition: id, + State: ring.OwnerActive, + } + } + for i := activeCount; i < activeCount+inactiveCount; i++ { + id := int32(i) + + partitionRing.Partitions[id] = ring.PartitionDesc{ + Id: id, + Tokens: []uint32{uint32(id)}, + State: ring.PartitionInactive, + } + } + return ring.NewPartitionRing(partitionRing) +} + +func TestConvertGlobalToLocalLimit_PartitionRing(t *testing.T) { + tests := []struct { + name string + globalLimit int + activePartitions int + inactivePartitions int + shardsPerUser int + expectedLocalLimit int + }{ + {"GlobalLimitZero", 0, 1, 0, 0, 0}, + {"SinglePartition", 100, 1, 0, 0, 100}, + {"MultiplePartitions", 200, 3, 0, 0, 66}, + {"NoActivePartitions", 200, 0, 3, 0, 0}, + {"PartialActivePartitions", 60, 3, 3, 0, 20}, + {"LimitLessThanActivePartitions", 3, 10, 0, 0, 0}, + {"LimitLessThanActivePartitions", 3, 10, 0, 0, 0}, + {"MultiplePartitionsWithLimitedShardsPerUser", 200, 3, 0, 2, 100}, + {"MultiplePartitionsWithMoreShardsPerUserThanPartitions", 200, 3, 0, 10, 66}, + } + + for _, tc := range tests { + t.Run(tc.name+"_partitionStrategy", func(t *testing.T) { + ringReader := &mockPartitionRingReader{ + ring: newMockPartitionRingWithPartitions(tc.activePartitions, tc.inactivePartitions), + } + + getPartitionsForUser := func(_ string) int { + return tc.shardsPerUser } - localLimit := limiter.convertGlobalToLocalLimit(tc.globalLimit) + strategy := newPartitionRingLimiterStrategy(ringReader, getPartitionsForUser) + + localLimit := strategy.convertGlobalToLocalLimit(tc.globalLimit, "test") if localLimit != tc.expectedLocalLimit { t.Errorf("expected %d, got %d", tc.expectedLocalLimit, localLimit) } diff --git a/pkg/ingester/owned_streams_test.go b/pkg/ingester/owned_streams_test.go index 7f114922fa44..373d37a5f62e 100644 --- a/pkg/ingester/owned_streams_test.go +++ b/pkg/ingester/owned_streams_test.go @@ -17,7 +17,7 @@ func Test_OwnedStreamService(t *testing.T) { require.NoError(t, err) // Mock the ring ring := &ringCountMock{count: 30} - limiter := NewLimiter(limits, NilMetrics, ring, 3) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(ring, 3)) service := newOwnedStreamService("test", limiter) require.Equal(t, 0, service.getOwnedStreamCount()) diff --git a/pkg/ingester/recalculate_owned_streams_test.go b/pkg/ingester/recalculate_owned_streams_test.go index d5dce8599287..d2d3583095b0 100644 --- a/pkg/ingester/recalculate_owned_streams_test.go +++ b/pkg/ingester/recalculate_owned_streams_test.go @@ -70,7 +70,7 @@ func Test_recalculateOwnedStreams_recalculateWithIngesterStrategy(t *testing.T) UseOwnedStreamCount: testData.featureEnabled, }, nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, mockRing, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(mockRing, 1)) tenant, err := newInstance( defaultConfig(), diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go index 03e0ca976628..fcad6558b21d 100644 --- a/pkg/ingester/stream_test.go +++ b/pkg/ingester/stream_test.go @@ -56,7 +56,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { @@ -114,7 +114,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { func TestPushDeduplication(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(t) @@ -150,7 +150,7 @@ func TestPushDeduplication(t *testing.T) { func TestPushDeduplicationExtraMetrics(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(t) @@ -220,7 +220,7 @@ func TestPushDeduplicationExtraMetrics(t *testing.T) { func TestPushRejectOldCounter(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(t) @@ -328,7 +328,7 @@ func TestEntryErrorCorrectlyReported(t *testing.T) { } limits, err := validation.NewOverrides(l, nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(t) @@ -367,7 +367,7 @@ func TestUnorderedPush(t *testing.T) { cfg.MaxChunkAge = 10 * time.Second limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(t) @@ -470,7 +470,7 @@ func TestPushRateLimit(t *testing.T) { } limits, err := validation.NewOverrides(l, nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(t) @@ -510,7 +510,7 @@ func TestPushRateLimitAllOrNothing(t *testing.T) { } limits, err := validation.NewOverrides(l, nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) cfg := defaultConfig() chunkfmt, headfmt := defaultChunkFormat(t) @@ -549,7 +549,7 @@ func TestPushRateLimitAllOrNothing(t *testing.T) { func TestReplayAppendIgnoresValidityWindow(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) cfg := defaultConfig() cfg.MaxChunkAge = time.Minute @@ -617,7 +617,7 @@ func Benchmark_PushStream(b *testing.B) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(b, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(b) s := newStream(chunkfmt, headfmt, &Config{MaxChunkAge: 24 * time.Hour}, limiter, "fake", model.Fingerprint(0), ls, true, NewStreamRateCalculator(), NilMetrics, nil, nil) diff --git a/pkg/ingester/streams_map_test.go b/pkg/ingester/streams_map_test.go index b14b3e07e497..87e8332eddd4 100644 --- a/pkg/ingester/streams_map_test.go +++ b/pkg/ingester/streams_map_test.go @@ -13,7 +13,7 @@ import ( func TestStreamsMap(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) + limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1)) chunkfmt, headfmt := defaultChunkFormat(t) ss := []*stream{ diff --git a/pkg/iter/entry_iterator_test.go b/pkg/iter/entry_iterator_test.go index fb1548ddc35d..2ef128bf62d8 100644 --- a/pkg/iter/entry_iterator_test.go +++ b/pkg/iter/entry_iterator_test.go @@ -178,8 +178,6 @@ func TestMergeIteratorPrefetch(t *testing.T) { } for testName, testFunc := range tests { - testFunc := testFunc - t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/kafka/config.go b/pkg/kafka/config.go index 7f981b7b5e73..13cfb618cfdb 100644 --- a/pkg/kafka/config.go +++ b/pkg/kafka/config.go @@ -36,18 +36,14 @@ const ( // in the worst case scenario, which is expected to be way above the actual one. maxProducerRecordDataBytesLimit = producerBatchMaxBytes - 16384 minProducerRecordDataBytesLimit = 1024 * 1024 - - kafkaConfigFlagPrefix = "ingest-storage.kafka" - targetConsumerLagAtStartupFlag = kafkaConfigFlagPrefix + ".target-consumer-lag-at-startup" - maxConsumerLagAtStartupFlag = kafkaConfigFlagPrefix + ".max-consumer-lag-at-startup" ) var ( ErrMissingKafkaAddress = errors.New("the Kafka address has not been configured") ErrMissingKafkaTopic = errors.New("the Kafka topic has not been configured") + ErrInconsistentConsumerLagAtStartup = errors.New("the target and max consumer lag at startup must be either both set to 0 or to a value greater than 0") + ErrInvalidMaxConsumerLagAtStartup = errors.New("the configured max consumer lag at startup must greater or equal than the configured target consumer lag") ErrInvalidProducerMaxRecordSizeBytes = fmt.Errorf("the configured producer max record size bytes must be a value between %d and %d", minProducerRecordDataBytesLimit, maxProducerRecordDataBytesLimit) - - consumeFromPositionOptions = []string{consumeFromLastOffset, consumeFromStart, consumeFromEnd, consumeFromTimestamp} ) // Config holds the generic config for the Kafka backend. @@ -68,6 +64,9 @@ type Config struct { ProducerMaxRecordSizeBytes int `yaml:"producer_max_record_size_bytes"` ProducerMaxBufferedBytes int64 `yaml:"producer_max_buffered_bytes"` + + TargetConsumerLagAtStartup time.Duration `yaml:"target_consumer_lag_at_startup"` + MaxConsumerLagAtStartup time.Duration `yaml:"max_consumer_lag_at_startup"` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { @@ -91,6 +90,10 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.IntVar(&cfg.ProducerMaxRecordSizeBytes, prefix+".producer-max-record-size-bytes", maxProducerRecordDataBytesLimit, "The maximum size of a Kafka record data that should be generated by the producer. An incoming write request larger than this size is split into multiple Kafka records. We strongly recommend to not change this setting unless for testing purposes.") f.Int64Var(&cfg.ProducerMaxBufferedBytes, prefix+".producer-max-buffered-bytes", 1024*1024*1024, "The maximum size of (uncompressed) buffered and unacknowledged produced records sent to Kafka. The produce request fails once this limit is reached. This limit is per Kafka client. 0 to disable the limit.") + + consumerLagUsage := fmt.Sprintf("Set both -%s and -%s to 0 to disable waiting for maximum consumer lag being honored at startup.", prefix+".target-consumer-lag-at-startup", prefix+".max-consumer-lag-at-startup") + f.DurationVar(&cfg.TargetConsumerLagAtStartup, prefix+".target-consumer-lag-at-startup", 2*time.Second, "The best-effort maximum lag a consumer tries to achieve at startup. "+consumerLagUsage) + f.DurationVar(&cfg.MaxConsumerLagAtStartup, prefix+".max-consumer-lag-at-startup", 15*time.Second, "The guaranteed maximum lag before a consumer is considered to have caught up reading from a partition at startup, becomes ACTIVE in the hash ring and passes the readiness check. "+consumerLagUsage) } func (cfg *Config) Validate() error { @@ -103,6 +106,12 @@ func (cfg *Config) Validate() error { if cfg.ProducerMaxRecordSizeBytes < minProducerRecordDataBytesLimit || cfg.ProducerMaxRecordSizeBytes > maxProducerRecordDataBytesLimit { return ErrInvalidProducerMaxRecordSizeBytes } + if (cfg.TargetConsumerLagAtStartup == 0 && cfg.MaxConsumerLagAtStartup != 0) || (cfg.TargetConsumerLagAtStartup != 0 && cfg.MaxConsumerLagAtStartup == 0) { + return ErrInconsistentConsumerLagAtStartup + } + if cfg.MaxConsumerLagAtStartup < cfg.TargetConsumerLagAtStartup { + return ErrInvalidMaxConsumerLagAtStartup + } return nil } diff --git a/pkg/kafka/partition/reader.go b/pkg/kafka/partition/reader.go index 9972d13307e8..74f18b02057f 100644 --- a/pkg/kafka/partition/reader.go +++ b/pkg/kafka/partition/reader.go @@ -6,20 +6,31 @@ import ( "math" "time" + "github.com/coder/quartz" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/multierror" "github.com/grafana/dskit/services" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kerr" "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/kmsg" "github.com/twmb/franz-go/plugin/kprom" "github.com/grafana/loki/v3/pkg/kafka" ) +var errWaitTargetLagDeadlineExceeded = errors.New("waiting for target lag deadline exceeded") + +const ( + kafkaStartOffset = -2 + kafkaEndOffset = -1 +) + // Reader is responsible for reading data from a specific Kafka partition // and passing it to the consumer for processing. It is a core component of the // Loki ingester's Kafka-based ingestion pipeline. @@ -32,11 +43,13 @@ type Reader struct { consumerFactory ConsumerFactory committer *partitionCommitter lastProcessedOffset int64 + recordsChan chan []Record client *kgo.Client logger log.Logger metrics readerMetrics reg prometheus.Registerer + clock quartz.Clock } type Record struct { @@ -79,18 +92,45 @@ func NewReader( // start initializes the Kafka client and committer for the PartitionReader. // This method is called when the PartitionReader service starts. -func (p *Reader) start(_ context.Context) error { +func (p *Reader) start(ctx context.Context) error { var err error - p.client, err = kafka.NewReaderClient(p.kafkaCfg, p.metrics.kprom, p.logger, - kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{ - p.kafkaCfg.Topic: {p.partitionID: kgo.NewOffset().AtStart()}, - }), - ) + p.client, err = kafka.NewReaderClient(p.kafkaCfg, p.metrics.kprom, p.logger) if err != nil { return errors.Wrap(err, "creating kafka reader client") } + + // We manage our commits manually, so we must fetch the last offset for our consumer group to find out where to read from. + lastCommittedOffset := p.fetchLastCommittedOffset(ctx) + p.client.AddConsumePartitions(map[string]map[int32]kgo.Offset{ + p.kafkaCfg.Topic: {p.partitionID: kgo.NewOffset().At(lastCommittedOffset)}, + }) + p.committer = newCommitter(p.kafkaCfg, kadm.NewClient(p.client), p.partitionID, p.consumerGroup, p.logger, p.reg) - // todo: attempt to ensure max lag timestamp on startup. + + if targetLag, maxLag := p.kafkaCfg.TargetConsumerLagAtStartup, p.kafkaCfg.MaxConsumerLagAtStartup; targetLag > 0 && maxLag > 0 { + consumer, err := p.consumerFactory(p.committer) + if err != nil { + return fmt.Errorf("creating consumer: %w", err) + } + + cancelCtx, cancel := context.WithCancel(ctx) + // Temporarily start a consumer to do the initial update + recordsChan := make(chan []Record) + wait := consumer.Start(cancelCtx, recordsChan) + // Shutdown the consumer after catching up. We start a new instance in the run method to tie the lifecycle to the run context. + defer func() { + close(recordsChan) + cancel() + wait() + }() + + err = p.processNextFetchesUntilTargetOrMaxLagHonored(ctx, p.kafkaCfg.MaxConsumerLagAtStartup, p.kafkaCfg.TargetConsumerLagAtStartup, recordsChan) + if err != nil { + level.Error(p.logger).Log("msg", "failed to catch up to max lag", "partition", p.partitionID, "consumer_group", p.consumerGroup, "err", err) + return err + } + } + return nil } @@ -114,7 +154,251 @@ func (p *Reader) run(ctx context.Context) error { return nil } -func (p *Reader) startFetchLoop(ctx context.Context) <-chan []Record { +func (p *Reader) fetchLastCommittedOffset(ctx context.Context) int64 { + // We manually create a request so that we can request the offset for a single partition + // only, which is more performant than requesting the offsets for all partitions. + req := kmsg.NewPtrOffsetFetchRequest() + req.Topics = []kmsg.OffsetFetchRequestTopic{{Topic: p.kafkaCfg.Topic, Partitions: []int32{p.partitionID}}} + req.Group = p.consumerGroup + + resps := p.client.RequestSharded(ctx, req) + + // Since we issued a request for only 1 partition, we expect exactly 1 response. + if expected, actual := 1, len(resps); actual != expected { + level.Error(p.logger).Log("msg", fmt.Sprintf("unexpected number of responses (expected: %d, got: %d)", expected, actual), "expected", expected, "actual", len(resps)) + return kafkaStartOffset + } + // Ensure no error occurred. + res := resps[0] + if res.Err != nil { + level.Error(p.logger).Log("msg", "error fetching group offset for partition", "err", res.Err) + return kafkaStartOffset + } + + // Parse the response. + fetchRes, ok := res.Resp.(*kmsg.OffsetFetchResponse) + if !ok { + level.Error(p.logger).Log("msg", "unexpected response type") + return kafkaStartOffset + } + if expected, actual := 1, len(fetchRes.Groups); actual != expected { + level.Error(p.logger).Log("msg", fmt.Sprintf("unexpected number of groups in the response (expected: %d, got: %d)", expected, actual)) + return kafkaStartOffset + } + if expected, actual := 1, len(fetchRes.Groups[0].Topics); actual != expected { + level.Error(p.logger).Log("msg", fmt.Sprintf("unexpected number of topics in the response (expected: %d, got: %d)", expected, actual)) + return kafkaStartOffset + } + if expected, actual := p.kafkaCfg.Topic, fetchRes.Groups[0].Topics[0].Topic; expected != actual { + level.Error(p.logger).Log("msg", fmt.Sprintf("unexpected topic in the response (expected: %s, got: %s)", expected, actual)) + return kafkaStartOffset + } + if expected, actual := 1, len(fetchRes.Groups[0].Topics[0].Partitions); actual != expected { + level.Error(p.logger).Log("msg", fmt.Sprintf("unexpected number of partitions in the response (expected: %d, got: %d)", expected, actual)) + return kafkaStartOffset + } + if expected, actual := p.partitionID, fetchRes.Groups[0].Topics[0].Partitions[0].Partition; actual != expected { + level.Error(p.logger).Log("msg", fmt.Sprintf("unexpected partition in the response (expected: %d, got: %d)", expected, actual)) + return kafkaStartOffset + } + if err := kerr.ErrorForCode(fetchRes.Groups[0].Topics[0].Partitions[0].ErrorCode); err != nil { + level.Error(p.logger).Log("msg", "unexpected error in the response", "err", err) + return kafkaStartOffset + } + + return fetchRes.Groups[0].Topics[0].Partitions[0].Offset +} + +func (p *Reader) fetchPartitionOffset(ctx context.Context, position int64) (int64, error) { + // Create a custom request to fetch the latest offset of a specific partition. + // We manually create a request so that we can request the offset for a single partition + // only, which is more performant than requesting the offsets for all partitions. + partitionReq := kmsg.NewListOffsetsRequestTopicPartition() + partitionReq.Partition = p.partitionID + partitionReq.Timestamp = position + + topicReq := kmsg.NewListOffsetsRequestTopic() + topicReq.Topic = p.kafkaCfg.Topic + topicReq.Partitions = []kmsg.ListOffsetsRequestTopicPartition{partitionReq} + + req := kmsg.NewPtrListOffsetsRequest() + req.IsolationLevel = 0 // 0 means READ_UNCOMMITTED. + req.Topics = []kmsg.ListOffsetsRequestTopic{topicReq} + + // Even if we share the same client, other in-flight requests are not canceled once this context is canceled + // (or its deadline is exceeded). We've verified it with a unit test. + resps := p.client.RequestSharded(ctx, req) + + // Since we issued a request for only 1 partition, we expect exactly 1 response. + if expected := 1; len(resps) != 1 { + return 0, fmt.Errorf("unexpected number of responses (expected: %d, got: %d)", expected, len(resps)) + } + + // Ensure no error occurred. + res := resps[0] + if res.Err != nil { + return 0, res.Err + } + + // Parse the response. + listRes, ok := res.Resp.(*kmsg.ListOffsetsResponse) + if !ok { + return 0, errors.New("unexpected response type") + } + if expected, actual := 1, len(listRes.Topics); actual != expected { + return 0, fmt.Errorf("unexpected number of topics in the response (expected: %d, got: %d)", expected, actual) + } + if expected, actual := p.kafkaCfg.Topic, listRes.Topics[0].Topic; expected != actual { + return 0, fmt.Errorf("unexpected topic in the response (expected: %s, got: %s)", expected, actual) + } + if expected, actual := 1, len(listRes.Topics[0].Partitions); actual != expected { + return 0, fmt.Errorf("unexpected number of partitions in the response (expected: %d, got: %d)", expected, actual) + } + if expected, actual := p.partitionID, listRes.Topics[0].Partitions[0].Partition; actual != expected { + return 0, fmt.Errorf("unexpected partition in the response (expected: %d, got: %d)", expected, actual) + } + if err := kerr.ErrorForCode(listRes.Topics[0].Partitions[0].ErrorCode); err != nil { + return 0, err + } + + return listRes.Topics[0].Partitions[0].Offset, nil +} + +// processNextFetchesUntilTargetOrMaxLagHonored process records from Kafka until at least the maxLag is honored. +// This function does a best-effort to get lag below targetLag, but it's not guaranteed that it will be +// reached once this function successfully returns (only maxLag is guaranteed). +func (p *Reader) processNextFetchesUntilTargetOrMaxLagHonored(ctx context.Context, targetLag, maxLag time.Duration, recordsChan chan<- []Record) error { + logger := log.With(p.logger, "target_lag", targetLag, "max_lag", maxLag) + level.Info(logger).Log("msg", "partition reader is starting to consume partition until target and max consumer lag is honored") + + attempts := []func() (time.Duration, error){ + // First process fetches until at least the max lag is honored. + func() (time.Duration, error) { + return p.processNextFetchesUntilLagHonored(ctx, maxLag, logger, recordsChan, time.Since) + }, + + // If the target lag hasn't been reached with the first attempt (which stops once at least the max lag + // is honored) then we try to reach the (lower) target lag within a fixed time (best-effort). + // The timeout is equal to the max lag. This is done because we expect at least a 2x replay speed + // from Kafka (which means at most it takes 1s to ingest 2s of data): assuming new data is continuously + // written to the partition, we give the reader maxLag time to replay the backlog + ingest the new data + // written in the meanwhile. + func() (time.Duration, error) { + timedCtx, cancel := context.WithTimeoutCause(ctx, maxLag, errWaitTargetLagDeadlineExceeded) + defer cancel() + + return p.processNextFetchesUntilLagHonored(timedCtx, targetLag, logger, recordsChan, time.Since) + }, + + // If the target lag hasn't been reached with the previous attempt then we'll move on. However, + // we still need to guarantee that in the meanwhile the lag didn't increase and max lag is still honored. + func() (time.Duration, error) { + return p.processNextFetchesUntilLagHonored(ctx, maxLag, logger, recordsChan, time.Since) + }, + } + + var currLag time.Duration + for _, attempt := range attempts { + var err error + + currLag, err = attempt() + if errors.Is(err, errWaitTargetLagDeadlineExceeded) { + continue + } + if err != nil { + return err + } + if currLag <= targetLag { + level.Info(logger).Log( + "msg", "partition reader consumed partition and current lag is lower or equal to configured target consumer lag", + "last_consumed_offset", p.committer.lastCommittedOffset, + "current_lag", currLag, + ) + return nil + } + } + + level.Warn(logger).Log( + "msg", "partition reader consumed partition and current lag is lower than configured max consumer lag but higher than target consumer lag", + "last_consumed_offset", p.committer.lastCommittedOffset, + "current_lag", currLag, + ) + return nil +} + +func (p *Reader) processNextFetchesUntilLagHonored(ctx context.Context, maxLag time.Duration, logger log.Logger, recordsChan chan<- []Record, timeSince func(time.Time) time.Duration) (time.Duration, error) { + boff := backoff.New(ctx, backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: time.Second, + MaxRetries: 0, // Retry forever (unless context is canceled / deadline exceeded). + }) + currLag := time.Duration(0) + + for boff.Ongoing() { + // Send a direct request to the Kafka backend to fetch the partition start offset. + partitionStartOffset, err := p.fetchPartitionOffset(ctx, kafkaStartOffset) + if err != nil { + level.Warn(logger).Log("msg", "partition reader failed to fetch partition start offset", "err", err) + boff.Wait() + continue + } + + // Send a direct request to the Kafka backend to fetch the last produced offset. + // We intentionally don't use WaitNextFetchLastProducedOffset() to not introduce further + // latency. + lastProducedOffsetRequestedAt := time.Now() + lastProducedOffset, err := p.fetchPartitionOffset(ctx, kafkaEndOffset) + if err != nil { + level.Warn(logger).Log("msg", "partition reader failed to fetch last produced offset", "err", err) + boff.Wait() + continue + } + lastProducedOffset = lastProducedOffset - 1 // Kafka returns the next empty offset so we must subtract 1 to get the oldest written offset. + + // Ensure there are some records to consume. For example, if the partition has been inactive for a long + // time and all its records have been deleted, the partition start offset may be > 0 but there are no + // records to actually consume. + if partitionStartOffset > lastProducedOffset { + level.Info(logger).Log("msg", "partition reader found no records to consume because partition is empty", "partition_start_offset", partitionStartOffset, "last_produced_offset", lastProducedOffset) + return 0, nil + } + + // This message is NOT expected to be logged with a very high rate. In this log we display the last measured + // lag. If we don't have it (lag is zero value), then it will not be logged. + level.Info(loggerWithCurrentLagIfSet(logger, currLag)).Log("msg", "partition reader is consuming records to honor target and max consumer lag", "partition_start_offset", partitionStartOffset, "last_produced_offset", lastProducedOffset) + + for boff.Ongoing() { + // Continue reading until we reached the desired offset. + if lastProducedOffset <= p.lastProcessedOffset { + break + } + + records := p.poll(ctx) + recordsChan <- records + } + if boff.Err() != nil { + return 0, boff.ErrCause() + } + + // If it took less than the max desired lag to replay the partition + // then we can stop here, otherwise we'll have to redo it. + if currLag = timeSince(lastProducedOffsetRequestedAt); currLag <= maxLag { + return currLag, nil + } + } + + return 0, boff.ErrCause() +} + +func loggerWithCurrentLagIfSet(logger log.Logger, currLag time.Duration) log.Logger { + if currLag <= 0 { + return logger + } + + return log.With(logger, "current_lag", currLag) +} + +func (p *Reader) startFetchLoop(ctx context.Context) chan []Record { records := make(chan []Record) go func() { for { diff --git a/pkg/kafka/partition/reader_test.go b/pkg/kafka/partition/reader_test.go index addc5779bb6a..8d548c831241 100644 --- a/pkg/kafka/partition/reader_test.go +++ b/pkg/kafka/partition/reader_test.go @@ -2,6 +2,7 @@ package partition import ( "context" + "fmt" "sync" "testing" "time" @@ -13,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/twmb/franz-go/pkg/kgo" "github.com/grafana/loki/v3/pkg/kafka" "github.com/grafana/loki/v3/pkg/kafka/testkafka" @@ -39,7 +41,10 @@ func (m *mockConsumer) Start(ctx context.Context, recordsChan <-chan []Record) f select { case <-ctx.Done(): return - case records := <-recordsChan: + case records, ok := <-recordsChan: + if !ok { + return + } m.recordsChan <- records } } @@ -100,3 +105,129 @@ func TestPartitionReader_BasicFunctionality(t *testing.T) { err = services.StopAndAwaitTerminated(context.Background(), partitionReader) require.NoError(t, err) } + +func TestPartitionReader_ProcessCatchUpAtStartup(t *testing.T) { + _, kafkaCfg := testkafka.CreateCluster(t, 1, "test-topic") + var consumerStarting *mockConsumer + + consumerFactory := func(_ Committer) (Consumer, error) { + // Return two consumers to ensure we are processing requests during service `start()` and not during `run()`. + if consumerStarting == nil { + consumerStarting = newMockConsumer() + return consumerStarting, nil + } + return newMockConsumer(), nil + } + + partitionReader, err := NewReader(kafkaCfg, 0, "test-consumer-group", consumerFactory, log.NewNopLogger(), prometheus.NewRegistry()) + require.NoError(t, err) + producer, err := kafka.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry()) + require.NoError(t, err) + + stream := logproto.Stream{ + Labels: labels.FromStrings("foo", "bar").String(), + Entries: []logproto.Entry{{Timestamp: time.Now(), Line: "test"}}, + } + + records, err := kafka.Encode(0, "test-tenant", stream, 10<<20) + require.NoError(t, err) + require.Len(t, records, 1) + + producer.ProduceSync(context.Background(), records...) + producer.ProduceSync(context.Background(), records...) + + // Enable the catch up logic so starting the reader will read any existing records. + kafkaCfg.TargetConsumerLagAtStartup = time.Second * 1 + kafkaCfg.MaxConsumerLagAtStartup = time.Second * 2 + + err = services.StartAndAwaitRunning(context.Background(), partitionReader) + require.NoError(t, err) + + // This message should not be processed by the startingConsumer + producer.ProduceSync(context.Background(), records...) + + // Wait for records to be processed + require.Eventually(t, func() bool { + return len(consumerStarting.recordsChan) == 1 // All pending messages will be received in one batch + }, 10*time.Second, 10*time.Millisecond) + + receivedRecords := <-consumerStarting.recordsChan + require.Len(t, receivedRecords, 2) + assert.Equal(t, "test-tenant", receivedRecords[0].TenantID) + assert.Equal(t, records[0].Value, receivedRecords[0].Content) + assert.Equal(t, "test-tenant", receivedRecords[1].TenantID) + assert.Equal(t, records[0].Value, receivedRecords[1].Content) + + assert.Equal(t, 0, len(consumerStarting.recordsChan)) + + err = services.StopAndAwaitTerminated(context.Background(), partitionReader) + require.NoError(t, err) +} + +func TestPartitionReader_ProcessCommits(t *testing.T) { + _, kafkaCfg := testkafka.CreateCluster(t, 1, "test-topic") + consumer := newMockConsumer() + + consumerFactory := func(_ Committer) (Consumer, error) { + return consumer, nil + } + + partitionID := int32(0) + partitionReader, err := NewReader(kafkaCfg, partitionID, "test-consumer-group", consumerFactory, log.NewNopLogger(), prometheus.NewRegistry()) + require.NoError(t, err) + producer, err := kafka.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry()) + require.NoError(t, err) + + // Init the client: This usually happens in "start" but we want to manage our own lifecycle for this test. + partitionReader.client, err = kafka.NewReaderClient(kafkaCfg, nil, log.NewNopLogger(), + kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{ + kafkaCfg.Topic: {partitionID: kgo.NewOffset().AtStart()}, + }), + ) + require.NoError(t, err) + + stream := logproto.Stream{ + Labels: labels.FromStrings("foo", "bar").String(), + Entries: []logproto.Entry{{Timestamp: time.Now(), Line: "test"}}, + } + + records, err := kafka.Encode(partitionID, "test-tenant", stream, 10<<20) + require.NoError(t, err) + require.Len(t, records, 1) + + ctx, cancel := context.WithDeadlineCause(context.Background(), time.Now().Add(10*time.Second), fmt.Errorf("test unexpectedly deadlocked")) + recordsChan := make(chan []Record) + wait := consumer.Start(ctx, recordsChan) + + targetLag := time.Second + + i := -1 + iterations := 5 + producer.ProduceSync(context.Background(), records...) + // timeSince acts as a hook for when we check if we've honoured the lag or not. We modify it to respond "no" initially, to force a re-loop, and then "yes" after `iterations`. + // We also inject a new kafka record each time so there is more to consume. + timeSince := func(time.Time) time.Duration { + i++ + if i < iterations { + producer.ProduceSync(context.Background(), records...) + return targetLag + 1 + } + return targetLag - 1 + } + + _, err = partitionReader.processNextFetchesUntilLagHonored(ctx, targetLag, log.NewNopLogger(), recordsChan, timeSince) + assert.NoError(t, err) + + // Wait to process all the records + cancel() + wait() + + close(recordsChan) + close(consumer.recordsChan) + recordsCount := 0 + for receivedRecords := range consumer.recordsChan { + recordsCount += len(receivedRecords) + } + // We expect to have processed all the records, including initial + one per iteration. + assert.Equal(t, iterations+1, recordsCount) +} diff --git a/pkg/kafka/testkafka/cluster.go b/pkg/kafka/testkafka/cluster.go index fc00e7272e7a..cc5847c2bfd3 100644 --- a/pkg/kafka/testkafka/cluster.go +++ b/pkg/kafka/testkafka/cluster.go @@ -102,7 +102,7 @@ func addSupportForConsumerGroups(t testing.TB, cluster *kfake.Cluster, topicName partitionID = allPartitions } else { partitionID = req.Groups[0].Topics[0].Partitions[0] - assert.Len(t, req.Groups[0], 1, "test only has support for one partition per request") + assert.Len(t, req.Groups[0].Topics, 1, "test only has support for one partition per request") assert.Len(t, req.Groups[0].Topics[0].Partitions, 1, "test only has support for one partition per request") } diff --git a/pkg/kafka/writer_client.go b/pkg/kafka/writer_client.go index ddd12a646d69..59fefda31d19 100644 --- a/pkg/kafka/writer_client.go +++ b/pkg/kafka/writer_client.go @@ -18,6 +18,8 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" + + "github.com/grafana/loki/v3/pkg/util/constants" ) // NewWriterClient returns the kgo.Client that should be used by the Writer. @@ -189,6 +191,7 @@ func NewProducer(client *kgo.Client, maxBufferedBytes int64, reg prometheus.Regi // Metrics. bufferedProduceBytes: promauto.With(reg).NewSummary( prometheus.SummaryOpts{ + Namespace: constants.Loki, Name: "buffered_produce_bytes", Help: "The buffered produce records in bytes. Quantile buckets keep track of buffered records size over the last 60s.", Objectives: map[float64]float64{0.5: 0.05, 0.99: 0.001, 1: 0.001}, @@ -197,16 +200,19 @@ func NewProducer(client *kgo.Client, maxBufferedBytes int64, reg prometheus.Regi }), bufferedProduceBytesLimit: promauto.With(reg).NewGauge( prometheus.GaugeOpts{ - Name: "buffered_produce_bytes_limit", - Help: "The bytes limit on buffered produce records. Produce requests fail once this limit is reached.", + Namespace: constants.Loki, + Name: "buffered_produce_bytes_limit", + Help: "The bytes limit on buffered produce records. Produce requests fail once this limit is reached.", }), produceRequestsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "produce_requests_total", - Help: "Total number of produce requests issued to Kafka.", + Namespace: constants.Loki, + Name: "produce_requests_total", + Help: "Total number of produce requests issued to Kafka.", }), produceFailuresTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "produce_failures_total", - Help: "Total number of failed produce requests issued to Kafka.", + Namespace: constants.Loki, + Name: "produce_failures_total", + Help: "Total number of failed produce requests issued to Kafka.", }, []string{"reason"}), } diff --git a/pkg/logcli/output/default_test.go b/pkg/logcli/output/default_test.go index 121b6d481600..e55627f75a20 100644 --- a/pkg/logcli/output/default_test.go +++ b/pkg/logcli/output/default_test.go @@ -79,8 +79,6 @@ func TestDefaultOutput_Format(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() writer := &bytes.Buffer{} @@ -168,8 +166,6 @@ func TestColorForLabels(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() labelsColor := getColor(testData.labels.String()) diff --git a/pkg/logcli/output/jsonl_test.go b/pkg/logcli/output/jsonl_test.go index 22e81fd29ea9..68e52294441f 100644 --- a/pkg/logcli/output/jsonl_test.go +++ b/pkg/logcli/output/jsonl_test.go @@ -63,8 +63,6 @@ func TestJSONLOutput_Format(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() writer := &bytes.Buffer{} diff --git a/pkg/logcli/output/raw_test.go b/pkg/logcli/output/raw_test.go index 844e8e811afc..6e000296a1a2 100644 --- a/pkg/logcli/output/raw_test.go +++ b/pkg/logcli/output/raw_test.go @@ -61,8 +61,6 @@ func TestRawOutput_Format(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/logcli/query/query_test.go b/pkg/logcli/query/query_test.go index 35077968a117..000c96a3ee0a 100644 --- a/pkg/logcli/query/query_test.go +++ b/pkg/logcli/query/query_test.go @@ -887,8 +887,6 @@ func TestParallelJobs(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run( tt.name, func(t *testing.T) { diff --git a/pkg/loghttp/params.go b/pkg/loghttp/params.go index c32161d5bf5e..6b289f445885 100644 --- a/pkg/loghttp/params.go +++ b/pkg/loghttp/params.go @@ -49,7 +49,7 @@ func lineLimit(r *http.Request) (uint32, error) { func detectedFieldsLimit(r *http.Request) (uint32, error) { limit := r.Form.Get("limit") if limit == "" { - // for backwards compatability + // for backwards compatibility limit = r.Form.Get("field_limit") } diff --git a/pkg/loghttp/params_test.go b/pkg/loghttp/params_test.go index 3456fdc2ed80..e1c695167e55 100644 --- a/pkg/loghttp/params_test.go +++ b/pkg/loghttp/params_test.go @@ -39,8 +39,6 @@ func TestHttp_defaultQueryRangeStep(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { assert.Equal(t, testData.expected, defaultQueryRangeStep(testData.start, testData.end)) }) @@ -123,8 +121,6 @@ func TestHttp_ParseRangeQuery_Step(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { req := httptest.NewRequest("GET", testData.reqPath, nil) err := req.ParseForm() @@ -176,8 +172,6 @@ func Test_interval(t *testing.T) { }, } for _, testData := range tests { - testData := testData - t.Run(testData.name, func(t *testing.T) { req := httptest.NewRequest("GET", testData.reqPath, nil) err := req.ParseForm() diff --git a/pkg/loghttp/query_test.go b/pkg/loghttp/query_test.go index 889a8900eac7..47b61622e4bd 100644 --- a/pkg/loghttp/query_test.go +++ b/pkg/loghttp/query_test.go @@ -280,7 +280,6 @@ func Test_QueryResponseUnmarshal(t *testing.T) { }, }, } { - tt := tt t.Run("", func(t *testing.T) { b, err := jsoniter.Marshal(tt) require.Nil(t, err) diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index fd534ac046e8..889d06344ddb 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -147,7 +147,6 @@ func TestEngine_LogsRateUnwrap(t *testing.T) { promql.Vector{promql.Sample{T: 60 * 1000, F: 0.46666766666666665, Metric: labels.FromStrings("app", "foo")}}, }, } { - test := test t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) { t.Parallel() @@ -954,7 +953,6 @@ func TestEngine_InstantQuery(t *testing.T) { }, }, } { - test := test t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) { eng := NewEngine(EngineOpts{}, newQuerierRecorder(t, test.data, test.params), NoLimits, log.NewNopLogger()) @@ -2256,7 +2254,6 @@ func TestEngine_RangeQuery(t *testing.T) { }, }, } { - test := test t.Run(fmt.Sprintf("%s %s", test.qs, test.direction), func(t *testing.T) { t.Parallel() @@ -2425,7 +2422,6 @@ func TestStepEvaluator_Error(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { eng := NewEngine(EngineOpts{}, tc.querier, NoLimits, log.NewNopLogger()) diff --git a/pkg/logql/log/parser_hints_test.go b/pkg/logql/log/parser_hints_test.go index 96bfc15b3863..d41a597501b0 100644 --- a/pkg/logql/log/parser_hints_test.go +++ b/pkg/logql/log/parser_hints_test.go @@ -225,7 +225,6 @@ func Test_ParserHints(t *testing.T) { `{app="nginx", message_message="foo"}`, }, } { - tt := tt t.Run(tt.expr, func(t *testing.T) { t.Parallel() expr, err := syntax.ParseSampleExpr(tt.expr) diff --git a/pkg/logql/log/parser_test.go b/pkg/logql/log/parser_test.go index 28989b7cb5fe..5ac57b9ef054 100644 --- a/pkg/logql/log/parser_test.go +++ b/pkg/logql/log/parser_test.go @@ -368,6 +368,26 @@ func TestJSONExpressionParser(t *testing.T) { labels.FromStrings("param", "1"), NoParserHints(), }, + { + "object element not present", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("undefined", `pod[""]`), + }, + labels.EmptyLabels(), + labels.FromStrings("undefined", ""), + NoParserHints(), + }, + { + "accessing invalid array index", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("param", `pod.deployment.params[""]`), + }, + labels.EmptyLabels(), + labels.FromStrings("param", ""), + NoParserHints(), + }, { "array string element", testLine, @@ -1386,7 +1406,6 @@ func Test_PatternParser(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.pattern, func(t *testing.T) { t.Parallel() b := NewBaseLabelsBuilder().ForLabels(tt.lbs, tt.lbs.Hash()) diff --git a/pkg/logql/log/pattern/lexer_test.go b/pkg/logql/log/pattern/lexer_test.go index 3e6bcf1b12e9..30638f4e8a3c 100644 --- a/pkg/logql/log/pattern/lexer_test.go +++ b/pkg/logql/log/pattern/lexer_test.go @@ -20,7 +20,6 @@ func Test_Lex(t *testing.T) { {`<1foo>`, []int{LITERAL, LITERAL, LITERAL, LITERAL, LITERAL, LITERAL}}, {`▶`, []int{LITERAL}}, } { - tc := tc t.Run(tc.input, func(t *testing.T) { actual := []int{} l := newLexer() diff --git a/pkg/logql/log/pattern/parser_test.go b/pkg/logql/log/pattern/parser_test.go index dbcb418fd382..0f1166aa2681 100644 --- a/pkg/logql/log/pattern/parser_test.go +++ b/pkg/logql/log/pattern/parser_test.go @@ -53,7 +53,6 @@ func Test_Parse(t *testing.T) { nil, }, } { - tc := tc actual, err := parseExpr(tc.input) if tc.err != nil || err != nil { require.Equal(t, tc.err, err) diff --git a/pkg/logql/log/pattern/pattern_test.go b/pkg/logql/log/pattern/pattern_test.go index ca4f3ea47d96..449843a28e07 100644 --- a/pkg/logql/log/pattern/pattern_test.go +++ b/pkg/logql/log/pattern/pattern_test.go @@ -176,7 +176,6 @@ func Test_BytesIndexUnicode(t *testing.T) { func Test_matcher_Matches(t *testing.T) { for _, tt := range fixtures { - tt := tt t.Run(tt.expr, func(t *testing.T) { t.Parallel() m, err := New(tt.expr) diff --git a/pkg/logql/rangemapper_test.go b/pkg/logql/rangemapper_test.go index 5365c7b2b73f..cd6ad68c1bec 100644 --- a/pkg/logql/rangemapper_test.go +++ b/pkg/logql/rangemapper_test.go @@ -75,7 +75,6 @@ func Test_SplitRangeInterval(t *testing.T) { 2, }, } { - tc := tc t.Run(tc.expr, func(t *testing.T) { t.Parallel() @@ -1811,7 +1810,6 @@ func Test_SplitRangeVectorMapping(t *testing.T) { 3, }, } { - tc := tc t.Run(tc.expr, func(t *testing.T) { t.Parallel() @@ -2002,7 +2000,6 @@ func Test_SplitRangeVectorMapping_Noop(t *testing.T) { `vector(0.000000)`, }, } { - tc := tc t.Run(tc.expr, func(t *testing.T) { t.Parallel() diff --git a/pkg/logql/sketch/topk_slow_test.go b/pkg/logql/sketch/topk_slow_test.go index 8f2095d987de..fe5105562b73 100644 --- a/pkg/logql/sketch/topk_slow_test.go +++ b/pkg/logql/sketch/topk_slow_test.go @@ -135,7 +135,6 @@ func TestCMSTopk(t *testing.T) { } for _, tc := range testcases { - tc := tc t.Run(fmt.Sprintf("num_streams/%d_k/%d_iterations/%d", tc.numStreams, tc.k, tc.iterations), func(t *testing.T) { t.Parallel() missing := 0 diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go index 98987e38c4be..88fc0021eb33 100644 --- a/pkg/logql/syntax/ast_test.go +++ b/pkg/logql/syntax/ast_test.go @@ -53,7 +53,6 @@ func Test_logSelectorExpr_String(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.selector, func(t *testing.T) { t.Parallel() expr, err := ParseLogSelector(tt.selector, true) @@ -588,7 +587,6 @@ func Test_FilterMatcher(t *testing.T) { []linecheck{{"counter=1", false}, {"counter=0", false}, {"counter=-1", true}, {"counter=-2", true}}, }, } { - tt := tt t.Run(tt.q, func(t *testing.T) { t.Parallel() expr, err := ParseLogSelector(tt.q, true) diff --git a/pkg/logql/syntax/walk_test.go b/pkg/logql/syntax/walk_test.go index 3350515b9c46..ee536e969471 100644 --- a/pkg/logql/syntax/walk_test.go +++ b/pkg/logql/syntax/walk_test.go @@ -26,7 +26,6 @@ func Test_Walkable(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.desc, func(t *testing.T) { expr, err := ParseExpr(test.expr) require.Nil(t, err) @@ -72,7 +71,6 @@ func Test_AppendMatchers(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.desc, func(t *testing.T) { expr, err := ParseExpr(test.expr) require.NoError(t, err) diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 84af0a73504f..f59218307e7d 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -736,7 +736,7 @@ func (t *Loki) setupModuleManager() error { PatternRingClient: {Server, MemberlistKV, Analytics}, PatternIngesterTee: {Server, MemberlistKV, Analytics, PatternRingClient}, PatternIngester: {Server, MemberlistKV, Analytics, PatternRingClient, PatternIngesterTee}, - IngesterQuerier: {Ring}, + IngesterQuerier: {Ring, PartitionRing, Overrides}, QuerySchedulerRing: {Overrides, MemberlistKV}, IndexGatewayRing: {Overrides, MemberlistKV}, PartitionRing: {MemberlistKV, Server, Ring}, diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 4c718722a509..24e19d7c58e0 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -322,6 +322,10 @@ func (t *Loki) initTenantConfigs() (_ services.Service, err error) { func (t *Loki) initDistributor() (services.Service, error) { t.Cfg.Distributor.KafkaConfig = t.Cfg.KafkaConfig + if t.Cfg.Distributor.KafkaEnabled && !t.Cfg.Ingester.KafkaIngestion.Enabled { + return nil, errors.New("kafka is enabled in distributor but not in ingester") + } + var err error logger := log.With(util_log.Logger, "component", "distributor") t.distributor, err = distributor.New( @@ -971,8 +975,9 @@ func (t *Loki) setupAsyncStore() error { } func (t *Loki) initIngesterQuerier() (_ services.Service, err error) { - logger := log.With(util_log.Logger, "component", "querier") - t.ingesterQuerier, err = querier.NewIngesterQuerier(t.Cfg.IngesterClient, t.ring, t.Cfg.Querier.ExtraQueryDelay, t.Cfg.MetricsNamespace, logger) + logger := log.With(util_log.Logger, "component", "ingester-querier") + + t.ingesterQuerier, err = querier.NewIngesterQuerier(t.Cfg.Querier, t.Cfg.IngesterClient, t.ring, t.partitionRing, t.Overrides.IngestionPartitionsTenantShardSize, t.Cfg.MetricsNamespace, logger) if err != nil { return nil, err } @@ -1480,8 +1485,6 @@ func (t *Loki) initIndexGateway() (services.Service, error) { var indexClients []indexgateway.IndexClientWithRange for i, period := range t.Cfg.SchemaConfig.Configs { - period := period - if period.IndexType != types.BoltDBShipperType { continue } @@ -1754,7 +1757,7 @@ func (t *Loki) initAnalytics() (services.Service, error) { // The Ingest Partition Ring is responsible for watching the available ingesters and assigning partitions to incoming requests. func (t *Loki) initPartitionRing() (services.Service, error) { - if !t.Cfg.Ingester.KafkaIngestion.Enabled { + if !t.Cfg.Ingester.KafkaIngestion.Enabled && !t.Cfg.Querier.QueryPartitionIngesters { return nil, nil } diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go index 1aabea548aed..9359feb8dd34 100644 --- a/pkg/pattern/drain/drain_test.go +++ b/pkg/pattern/drain/drain_test.go @@ -426,7 +426,6 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.inputFile, func(t *testing.T) { file, err := os.Open(tt.inputFile) require.NoError(t, err) @@ -529,7 +528,6 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { for _, line := range tt.inputLines { tt.drain.Train(line, 0) @@ -630,7 +628,6 @@ func TestDrain_PruneTreeClearsOldBranches(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { now := time.Now() for i, line := range tt.inputLines { diff --git a/pkg/pattern/ingester_querier.go b/pkg/pattern/ingester_querier.go index 3a275ffd4644..185a35b0463d 100644 --- a/pkg/pattern/ingester_querier.go +++ b/pkg/pattern/ingester_querier.go @@ -146,8 +146,6 @@ func (q *IngesterQuerier) forGivenIngesters(ctx context.Context, replicationSet responses := make([]ResponseFromIngesters, len(replicationSet.Instances)) for i, ingester := range replicationSet.Instances { - ingester := ingester - i := i g.Go(func() error { client, err := q.ringClient.GetClientFor(ingester.Addr) if err != nil { diff --git a/pkg/pattern/iter/iterator_test.go b/pkg/pattern/iter/iterator_test.go index 3d14b2550c4b..12506dac0b4b 100644 --- a/pkg/pattern/iter/iterator_test.go +++ b/pkg/pattern/iter/iterator_test.go @@ -48,7 +48,6 @@ func TestSliceIterator(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { got := slice(NewSlice(tt.pattern, tt.samples)) require.Equal(t, tt.want, got) diff --git a/pkg/pattern/iter/merge_test.go b/pkg/pattern/iter/merge_test.go index a1d643a5a01c..be9069c9ed72 100644 --- a/pkg/pattern/iter/merge_test.go +++ b/pkg/pattern/iter/merge_test.go @@ -62,7 +62,6 @@ func TestMerge(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { it := NewMerge(tt.iterators...) defer it.Close() diff --git a/pkg/querier-rf1/wal/chunks.go b/pkg/querier-rf1/wal/chunks.go index bfe565ff6134..732180b8742f 100644 --- a/pkg/querier-rf1/wal/chunks.go +++ b/pkg/querier-rf1/wal/chunks.go @@ -285,8 +285,6 @@ func downloadChunks(ctx context.Context, storage BlockStorage, chks []ChunkData) g, ctx := errgroup.WithContext(ctx) g.SetLimit(64) for i, chunk := range chks { - chunk := chunk - i := i g.Go(func() error { chunkData, err := readChunkData(ctx, storage, chunk) if err != nil { diff --git a/pkg/querier-rf1/wal/querier.go b/pkg/querier-rf1/wal/querier.go index 0fb2cc23dc52..e43f3fc1041f 100644 --- a/pkg/querier-rf1/wal/querier.go +++ b/pkg/querier-rf1/wal/querier.go @@ -178,8 +178,6 @@ func (q *Querier) forIndices(ctx context.Context, req *metastorepb.ListBlocksFor g, ctx := errgroup.WithContext(ctx) g.SetLimit(32) for _, meta := range metas { - - meta := meta g.Go(func() error { reader, err := q.blockStorage.GetObjectRange(ctx, wal.Dir+meta.Id, meta.IndexRef.Offset, meta.IndexRef.Length) if err != nil { diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go index c18ca7793066..cc076be1faef 100644 --- a/pkg/querier/ingester_querier.go +++ b/pkg/querier/ingester_querier.go @@ -8,6 +8,8 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/concurrency" + "github.com/grafana/dskit/user" "golang.org/x/exp/slices" "github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume" @@ -40,28 +42,32 @@ type responseFromIngesters struct { // IngesterQuerier helps with querying the ingesters. type IngesterQuerier struct { - ring ring.ReadRing - pool *ring_client.Pool - extraQueryDelay time.Duration - logger log.Logger + querierConfig Config + ring ring.ReadRing + partitionRing *ring.PartitionInstanceRing + getShardCountForTenant func(string) int + pool *ring_client.Pool + logger log.Logger } -func NewIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, metricsNamespace string, logger log.Logger) (*IngesterQuerier, error) { +func NewIngesterQuerier(querierConfig Config, clientCfg client.Config, ring ring.ReadRing, partitionRing *ring.PartitionInstanceRing, getShardCountForTenant func(string) int, metricsNamespace string, logger log.Logger) (*IngesterQuerier, error) { factory := func(addr string) (ring_client.PoolClient, error) { return client.New(clientCfg, addr) } - return newIngesterQuerier(clientCfg, ring, extraQueryDelay, ring_client.PoolAddrFunc(factory), metricsNamespace, logger) + return newIngesterQuerier(querierConfig, clientCfg, ring, partitionRing, getShardCountForTenant, ring_client.PoolAddrFunc(factory), metricsNamespace, logger) } // newIngesterQuerier creates a new IngesterQuerier and allows to pass a custom ingester client factory // used for testing purposes -func newIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, clientFactory ring_client.PoolFactory, metricsNamespace string, logger log.Logger) (*IngesterQuerier, error) { +func newIngesterQuerier(querierConfig Config, clientCfg client.Config, ring ring.ReadRing, partitionRing *ring.PartitionInstanceRing, getShardCountForTenant func(string) int, clientFactory ring_client.PoolFactory, metricsNamespace string, logger log.Logger) (*IngesterQuerier, error) { iq := IngesterQuerier{ - ring: ring, - pool: clientpool.NewPool("ingester", clientCfg.PoolConfig, ring, clientFactory, util_log.Logger, metricsNamespace), - extraQueryDelay: extraQueryDelay, - logger: logger, + querierConfig: querierConfig, + ring: ring, + partitionRing: partitionRing, + getShardCountForTenant: getShardCountForTenant, // limits? + pool: clientpool.NewPool("ingester", clientCfg.PoolConfig, ring, clientFactory, util_log.Logger, metricsNamespace), + logger: logger, } err := services.StartAndAwaitRunning(context.Background(), iq.pool) @@ -73,22 +79,53 @@ func newIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryD } // forAllIngesters runs f, in parallel, for all ingesters -// TODO taken from Cortex, see if we can refactor out an usable interface. func (q *IngesterQuerier) forAllIngesters(ctx context.Context, f func(context.Context, logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { + if q.querierConfig.QueryPartitionIngesters { + tenantID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + tenantShards := q.getShardCountForTenant(tenantID) + subring, err := q.partitionRing.ShuffleShardWithLookback(tenantID, tenantShards, q.querierConfig.QueryIngestersWithin, time.Now()) + if err != nil { + return nil, err + } + replicationSets, err := subring.GetReplicationSetsForOperation(ring.Read) + if err != nil { + return nil, err + } + return q.forGivenIngesterSets(ctx, replicationSets, f) + } + replicationSet, err := q.ring.GetReplicationSetForOperation(ring.Read) if err != nil { return nil, err } - return q.forGivenIngesters(ctx, replicationSet, f) + return q.forGivenIngesters(ctx, replicationSet, defaultQuorumConfig(), f) } -// forGivenIngesters runs f, in parallel, for given ingesters -func (q *IngesterQuerier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, f func(context.Context, logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { - cfg := ring.DoUntilQuorumConfig{ +// forGivenIngesterSets runs f, in parallel, for given ingester sets +func (q *IngesterQuerier) forGivenIngesterSets(ctx context.Context, replicationSet []ring.ReplicationSet, f func(context.Context, logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { + // Enable minimize requests so we initially query a single ingester per replication set, as each replication-set is one partition. + // Ingesters must supply zone information for this to have an effect. + config := ring.DoUntilQuorumConfig{ + MinimizeRequests: true, + } + return concurrency.ForEachJobMergeResults[ring.ReplicationSet, responseFromIngesters](ctx, replicationSet, 0, func(ctx context.Context, set ring.ReplicationSet) ([]responseFromIngesters, error) { + return q.forGivenIngesters(ctx, set, config, f) + }) +} + +func defaultQuorumConfig() ring.DoUntilQuorumConfig { + return ring.DoUntilQuorumConfig{ // Nothing here } - results, err := ring.DoUntilQuorum(ctx, replicationSet, cfg, func(ctx context.Context, ingester *ring.InstanceDesc) (responseFromIngesters, error) { +} + +// forGivenIngesters runs f, in parallel, for given ingesters +func (q *IngesterQuerier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, quorumConfig ring.DoUntilQuorumConfig, f func(context.Context, logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { + results, err := ring.DoUntilQuorum(ctx, replicationSet, quorumConfig, func(ctx context.Context, ingester *ring.InstanceDesc) (responseFromIngesters, error) { client, err := q.pool.GetClientFor(ingester.Addr) if err != nil { return responseFromIngesters{addr: ingester.Addr}, err @@ -212,7 +249,7 @@ func (q *IngesterQuerier) TailDisconnectedIngesters(ctx context.Context, req *lo } // Instance a tail client for each ingester to re(connect) - reconnectClients, err := q.forGivenIngesters(ctx, ring.ReplicationSet{Instances: reconnectIngesters}, func(_ context.Context, client logproto.QuerierClient) (interface{}, error) { + reconnectClients, err := q.forGivenIngesters(ctx, ring.ReplicationSet{Instances: reconnectIngesters}, defaultQuorumConfig(), func(_ context.Context, client logproto.QuerierClient) (interface{}, error) { return client.Tail(ctx, req) }) if err != nil { @@ -260,7 +297,7 @@ func (q *IngesterQuerier) TailersCount(ctx context.Context) ([]uint32, error) { return nil, httpgrpc.Errorf(http.StatusInternalServerError, "no active ingester found") } - responses, err := q.forGivenIngesters(ctx, replicationSet, func(ctx context.Context, querierClient logproto.QuerierClient) (interface{}, error) { + responses, err := q.forGivenIngesters(ctx, replicationSet, defaultQuorumConfig(), func(ctx context.Context, querierClient logproto.QuerierClient) (interface{}, error) { resp, err := querierClient.TailersCount(ctx, &logproto.TailersCountRequest{}) if err != nil { return nil, err diff --git a/pkg/querier/ingester_querier_test.go b/pkg/querier/ingester_querier_test.go index 713c170f7dea..788902c2624e 100644 --- a/pkg/querier/ingester_querier_test.go +++ b/pkg/querier/ingester_querier_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/dskit/user" "go.uber.org/atomic" "google.golang.org/grpc/codes" @@ -72,7 +73,6 @@ func TestIngesterQuerier_earlyExitOnQuorum(t *testing.T) { for testName, testData := range tests { for _, retErr := range []bool{true, false} { - testName, testData, retErr := testName, testData, retErr if retErr { testName += " call should return early on breaching max errors" } else { @@ -168,7 +168,6 @@ func TestIngesterQuerier_earlyExitOnQuorum(t *testing.T) { for testName, testData := range tests { for _, retErr := range []bool{true, false} { - testName, testData, retErr := testName, testData, retErr if retErr { testName += " call should not return early on breaching max errors" } else { @@ -226,6 +225,129 @@ func TestIngesterQuerier_earlyExitOnQuorum(t *testing.T) { } } +func TestIngesterQuerierFetchesResponsesFromPartitionIngesters(t *testing.T) { + t.Parallel() + ctx := user.InjectOrgID(context.Background(), "test-user") + ctx, cancel := context.WithTimeout(ctx, time.Second*10) + defer cancel() + + ingesters := []ring.InstanceDesc{ + mockInstanceDescWithZone("1.1.1.1", ring.ACTIVE, "A"), + mockInstanceDescWithZone("2.2.2.2", ring.ACTIVE, "B"), + mockInstanceDescWithZone("3.3.3.3", ring.ACTIVE, "A"), + mockInstanceDescWithZone("4.4.4.4", ring.ACTIVE, "B"), + mockInstanceDescWithZone("5.5.5.5", ring.ACTIVE, "A"), + mockInstanceDescWithZone("6.6.6.6", ring.ACTIVE, "B"), + } + + tests := map[string]struct { + method string + testFn func(*IngesterQuerier) error + retVal interface{} + shards int + }{ + "label": { + method: "Label", + testFn: func(ingesterQuerier *IngesterQuerier) error { + _, err := ingesterQuerier.Label(ctx, nil) + return err + }, + retVal: new(logproto.LabelResponse), + }, + "series": { + method: "Series", + testFn: func(ingesterQuerier *IngesterQuerier) error { + _, err := ingesterQuerier.Series(ctx, nil) + return err + }, + retVal: new(logproto.SeriesResponse), + }, + "get_chunk_ids": { + method: "GetChunkIDs", + testFn: func(ingesterQuerier *IngesterQuerier) error { + _, err := ingesterQuerier.GetChunkIDs(ctx, model.Time(0), model.Time(0)) + return err + }, + retVal: new(logproto.GetChunkIDsResponse), + }, + "select_logs": { + method: "Query", + testFn: func(ingesterQuerier *IngesterQuerier) error { + _, err := ingesterQuerier.SelectLogs(ctx, logql.SelectLogParams{ + QueryRequest: new(logproto.QueryRequest), + }) + return err + }, + retVal: newQueryClientMock(), + }, + "select_sample": { + method: "QuerySample", + testFn: func(ingesterQuerier *IngesterQuerier) error { + _, err := ingesterQuerier.SelectSample(ctx, logql.SelectSampleParams{ + SampleQueryRequest: new(logproto.SampleQueryRequest), + }) + return err + }, + retVal: newQuerySampleClientMock(), + }, + "select_logs_shuffle_sharded": { + method: "Query", + testFn: func(ingesterQuerier *IngesterQuerier) error { + _, err := ingesterQuerier.SelectLogs(ctx, logql.SelectLogParams{ + QueryRequest: new(logproto.QueryRequest), + }) + return err + }, + retVal: newQueryClientMock(), + shards: 2, // Must be less than number of partitions + }, + } + + for testName, testData := range tests { + cnt := atomic.NewInt32(0) + + t.Run(testName, func(t *testing.T) { + cnt.Store(0) + runFn := func(args mock.Arguments) { + ctx := args[0].(context.Context) + + select { + case <-ctx.Done(): + // should not be cancelled by the tracker + require.NoError(t, ctx.Err()) + default: + cnt.Add(1) + } + } + + instanceRing := newReadRingMock(ingesters, 0) + ingesterClient := newQuerierClientMock() + ingesterClient.On(testData.method, mock.Anything, mock.Anything, mock.Anything).Return(testData.retVal, nil).Run(runFn) + + partitions := 3 + ingestersPerPartition := len(ingesters) / partitions + assert.Greaterf(t, ingestersPerPartition, 1, "must have more than one ingester per partition") + + ingesterQuerier, err := newTestPartitionIngesterQuerier(ingesterClient, instanceRing, newPartitionInstanceRingMock(instanceRing, ingesters, partitions, ingestersPerPartition), testData.shards) + require.NoError(t, err) + + ingesterQuerier.querierConfig.QueryPartitionIngesters = true + + err = testData.testFn(ingesterQuerier) + require.NoError(t, err) + + if testData.shards == 0 { + testData.shards = partitions + } + expectedCalls := min(testData.shards, partitions) + // Wait for responses: We expect one request per queried partition because we have request minimization enabled & ingesters are in multiple zones. + // If shuffle sharding is enabled, we expect one query per shard as we write to a subset of partitions. + require.Eventually(t, func() bool { return cnt.Load() >= int32(expectedCalls) }, time.Millisecond*100, time.Millisecond*1, "expected all ingesters to respond") + ingesterClient.AssertNumberOfCalls(t, testData.method, expectedCalls) + }) + } +} + func TestQuerier_tailDisconnectedIngesters(t *testing.T) { t.Parallel() @@ -277,8 +399,6 @@ func TestQuerier_tailDisconnectedIngesters(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { req := logproto.TailRequest{ Query: "{type=\"test\"}", @@ -404,9 +524,24 @@ func TestIngesterQuerier_DetectedLabels(t *testing.T) { func newTestIngesterQuerier(readRingMock *readRingMock, ingesterClient *querierClientMock) (*IngesterQuerier, error) { return newIngesterQuerier( + mockQuerierConfig(), mockIngesterClientConfig(), readRingMock, - mockQuerierConfig().ExtraQueryDelay, + nil, + func(string) int { return 0 }, + newIngesterClientMockFactory(ingesterClient), + constants.Loki, + log.NewNopLogger(), + ) +} + +func newTestPartitionIngesterQuerier(ingesterClient *querierClientMock, instanceRing *readRingMock, partitionRing *ring.PartitionInstanceRing, tenantShards int) (*IngesterQuerier, error) { + return newIngesterQuerier( + mockQuerierConfig(), + mockIngesterClientConfig(), + instanceRing, + partitionRing, + func(string) int { return tenantShards }, newIngesterClientMockFactory(ingesterClient), constants.Loki, log.NewNopLogger(), diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 7c7f973b8c90..cdc4175f9fb0 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -72,6 +72,7 @@ type Config struct { QueryIngesterOnly bool `yaml:"query_ingester_only"` MultiTenantQueriesEnabled bool `yaml:"multi_tenant_queries_enabled"` PerRequestLimitsEnabled bool `yaml:"per_request_limits_enabled"` + QueryPartitionIngesters bool `yaml:"query_partition_ingesters" category:"experimental"` } // RegisterFlags register flags. @@ -85,6 +86,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.QueryIngesterOnly, "querier.query-ingester-only", false, "When true, queriers only query the ingesters, and not stored data. This is useful when the object store is unavailable.") f.BoolVar(&cfg.MultiTenantQueriesEnabled, "querier.multi-tenant-queries-enabled", false, "When true, allow queries to span multiple tenants.") f.BoolVar(&cfg.PerRequestLimitsEnabled, "querier.per-request-limits-enabled", false, "When true, querier limits sent via a header are enforced.") + f.BoolVar(&cfg.QueryPartitionIngesters, "querier.query-partition-ingesters", false, "When true, querier directs ingester queries to the partition-ingesters instead of the normal ingesters.") } // Validate validates the config. diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index df89d6b69561..ab70de4baace 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -17,7 +17,6 @@ import ( "github.com/grafana/dskit/grpcclient" "github.com/grafana/dskit/ring" ring_client "github.com/grafana/dskit/ring/client" - logql_log "github.com/grafana/loki/v3/pkg/logql/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -25,6 +24,8 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" grpc_metadata "google.golang.org/grpc/metadata" + logql_log "github.com/grafana/loki/v3/pkg/logql/log" + "github.com/grafana/loki/v3/pkg/distributor/clientpool" "github.com/grafana/loki/v3/pkg/ingester/client" "github.com/grafana/loki/v3/pkg/iter" @@ -419,6 +420,54 @@ func newReadRingMock(ingesters []ring.InstanceDesc, maxErrors int) *readRingMock } } +func (r *readRingMock) GetInstance(addr string) (ring.InstanceDesc, error) { + for _, ing := range r.replicationSet.Instances { + if ing.Addr == addr { + return ing, nil + } + } + return ring.InstanceDesc{}, errors.New("instance not found") +} + +// partitionRingMock is a mocked version of a ReadRing, used in querier unit tests +// to control the pool of ingesters available +type partitionRingMock struct { + ring *ring.PartitionRing +} + +func (p partitionRingMock) PartitionRing() *ring.PartitionRing { + return p.ring +} + +func newPartitionInstanceRingMock(ingesterRing ring.InstanceRingReader, ingesters []ring.InstanceDesc, numPartitions int, ingestersPerPartition int) *ring.PartitionInstanceRing { + partitions := make(map[int32]ring.PartitionDesc) + owners := make(map[string]ring.OwnerDesc) + for i := 0; i < numPartitions; i++ { + partitions[int32(i)] = ring.PartitionDesc{ + Id: int32(i), + State: ring.PartitionActive, + Tokens: []uint32{uint32(i)}, + } + + for j := 0; j < ingestersPerPartition; j++ { + ingesterIdx := i*ingestersPerPartition + j + if ingesterIdx < len(ingesters) { + owners[ingesters[ingesterIdx].Id] = ring.OwnerDesc{ + OwnedPartition: int32(i), + State: ring.OwnerActive, + } + } + } + } + partitionRing := partitionRingMock{ + ring: ring.NewPartitionRing(ring.PartitionRingDesc{ + Partitions: partitions, + Owners: owners, + }), + } + return ring.NewPartitionInstanceRing(partitionRing, ingesterRing, time.Hour) +} + func (r *readRingMock) Describe(_ chan<- *prometheus.Desc) { } @@ -518,11 +567,17 @@ func mockReadRingWithOneActiveIngester() *readRingMock { } func mockInstanceDesc(addr string, state ring.InstanceState) ring.InstanceDesc { + return mockInstanceDescWithZone(addr, state, "") +} + +func mockInstanceDescWithZone(addr string, state ring.InstanceState, zone string) ring.InstanceDesc { return ring.InstanceDesc{ + Id: addr, Addr: addr, Timestamp: time.Now().UnixNano(), State: state, Tokens: []uint32{1, 2, 3}, + Zone: zone, } } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 9b4928ee34c2..41265e00df59 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -529,8 +529,6 @@ func TestQuerier_concurrentTailLimits(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { // For this test's purpose, whenever a new ingester client needs to // be created, the factory will always return the same mock instance @@ -1362,7 +1360,7 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) { } func newQuerier(cfg Config, clientCfg client.Config, clientFactory ring_client.PoolFactory, ring ring.ReadRing, dg *mockDeleteGettter, store storage.Store, limits *validation.Overrides) (*SingleTenantQuerier, error) { - iq, err := newIngesterQuerier(clientCfg, ring, cfg.ExtraQueryDelay, clientFactory, constants.Loki, util_log.Logger) + iq, err := newIngesterQuerier(cfg, clientCfg, ring, nil, nil, clientFactory, constants.Loki, util_log.Logger) if err != nil { return nil, err } diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index e52bb1ec50a0..a116e0f4f7d7 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -837,7 +837,6 @@ func Test_codec_DecodeProtobufResponseParity(t *testing.T) { } codec := RequestProtobufCodec{} for i, queryTest := range queryTests { - i := i t.Run(queryTest.name, func(t *testing.T) { params := url.Values{ "query": []string{`{app="foo"}`}, diff --git a/pkg/querier/queryrange/prometheus_test.go b/pkg/querier/queryrange/prometheus_test.go index 2888fbfdd6ac..e6f00b127d02 100644 --- a/pkg/querier/queryrange/prometheus_test.go +++ b/pkg/querier/queryrange/prometheus_test.go @@ -269,7 +269,6 @@ func Test_encodePromResponse(t *testing.T) { }`, }, } { - tt := tt t.Run(tt.name, func(t *testing.T) { r, err := tt.resp.encode(context.Background()) require.NoError(t, err) diff --git a/pkg/querier/queryrange/queryrangebase/promql_test.go b/pkg/querier/queryrange/queryrangebase/promql_test.go index edc7a0e5829a..ee0309112e14 100644 --- a/pkg/querier/queryrange/queryrangebase/promql_test.go +++ b/pkg/querier/queryrange/queryrangebase/promql_test.go @@ -314,7 +314,6 @@ func Test_PromQL(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.normalQuery, func(t *testing.T) { baseQuery, err := engine.NewRangeQuery(context.Background(), shardAwareQueryable, nil, tt.normalQuery, start, end, step) diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go index f376455df4c2..2b1f7b951903 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go @@ -9,11 +9,11 @@ import ( proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" github_com_grafana_loki_v3_pkg_logproto "github.com/grafana/loki/v3/pkg/logproto" logproto "github.com/grafana/loki/v3/pkg/logproto" definitions "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions" resultscache "github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" - _ "google.golang.org/protobuf/types/known/durationpb" io "io" math "math" math_bits "math/bits" @@ -1022,7 +1022,7 @@ func (this *PrometheusRequest) String() string { `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Step:` + fmt.Sprintf("%v", this.Step) + `,`, - `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, + `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "resultscache.CachingOptions", 1), `&`, ``, 1) + `,`, `Headers:` + repeatedStringForHeaders + `,`, diff --git a/pkg/querier/queryrange/split_by_range_test.go b/pkg/querier/queryrange/split_by_range_test.go index e3c30c66cc54..257c04e37795 100644 --- a/pkg/querier/queryrange/split_by_range_test.go +++ b/pkg/querier/queryrange/split_by_range_test.go @@ -256,7 +256,6 @@ func Test_RangeVectorSplitAlign(t *testing.T) { expected: expectedMergedResponseWithTime(1+2+3+4, twelve34), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { srm := NewSplitByRangeMiddleware(log.NewNopLogger(), testEngineOpts, fakeLimits{ maxSeries: 10000, @@ -408,7 +407,6 @@ func Test_RangeVectorSplit(t *testing.T) { expected: expectedMergedResponse(1 + 2 + 3), }, } { - tc := tc t.Run(tc.in.GetQuery(), func(t *testing.T) { resp, err := srm.Wrap(queryrangebase.HandlerFunc( func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { diff --git a/pkg/querier/stats/stats.pb.go b/pkg/querier/stats/stats.pb.go index f4d7e4cc1d27..bae01dd0eeed 100644 --- a/pkg/querier/stats/stats.pb.go +++ b/pkg/querier/stats/stats.pb.go @@ -8,7 +8,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "google.golang.org/protobuf/types/known/durationpb" + _ "github.com/golang/protobuf/ptypes/duration" io "io" math "math" math_bits "math/bits" @@ -251,7 +251,7 @@ func (this *Stats) String() string { return "nil" } s := strings.Join([]string{`&Stats{`, - `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, + `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `FetchedSeriesCount:` + fmt.Sprintf("%v", this.FetchedSeriesCount) + `,`, `FetchedChunkBytes:` + fmt.Sprintf("%v", this.FetchedChunkBytes) + `,`, `}`, diff --git a/pkg/queue/queue_test.go b/pkg/queue/queue_test.go index 9b4aca8481c7..d7f59cb3b4a5 100644 --- a/pkg/queue/queue_test.go +++ b/pkg/queue/queue_test.go @@ -44,8 +44,6 @@ func BenchmarkGetNextRequest(b *testing.B) { } for _, benchCase := range benchCases { - benchCase := benchCase - b.Run(benchCase.name, func(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) diff --git a/pkg/ruler/base/ruler.pb.go b/pkg/ruler/base/ruler.pb.go index 81ef01420b28..5b3b1f1b4d5d 100644 --- a/pkg/ruler/base/ruler.pb.go +++ b/pkg/ruler/base/ruler.pb.go @@ -11,13 +11,13 @@ import ( proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" _ "github.com/grafana/loki/v3/pkg/logproto" github_com_grafana_loki_v3_pkg_logproto "github.com/grafana/loki/v3/pkg/logproto" rulespb "github.com/grafana/loki/v3/pkg/ruler/rulespb" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - _ "google.golang.org/protobuf/types/known/durationpb" io "io" math "math" math_bits "math/bits" @@ -1433,7 +1433,7 @@ func (this *GroupStateDesc) String() string { `Group:` + strings.Replace(fmt.Sprintf("%v", this.Group), "RuleGroupDesc", "rulespb.RuleGroupDesc", 1) + `,`, `ActiveRules:` + repeatedStringForActiveRules + `,`, `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, + `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1454,7 +1454,7 @@ func (this *RuleStateDesc) String() string { `LastError:` + fmt.Sprintf("%v", this.LastError) + `,`, `Alerts:` + repeatedStringForAlerts + `,`, `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, + `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s diff --git a/pkg/ruler/base/ruler_test.go b/pkg/ruler/base/ruler_test.go index b180c559d8d3..bdb437ed9279 100644 --- a/pkg/ruler/base/ruler_test.go +++ b/pkg/ruler/base/ruler_test.go @@ -1763,7 +1763,6 @@ func TestSendAlerts(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) { if len(tc.in) == 0 { diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go index 3765e9dd88a7..91afa25a655e 100644 --- a/pkg/ruler/rulespb/rules.pb.go +++ b/pkg/ruler/rulespb/rules.pb.go @@ -9,9 +9,9 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" _ "github.com/grafana/loki/v3/pkg/logproto" github_com_grafana_loki_v3_pkg_logproto "github.com/grafana/loki/v3/pkg/logproto" - _ "google.golang.org/protobuf/types/known/durationpb" io "io" math "math" math_bits "math/bits" @@ -657,7 +657,7 @@ func (this *RuleGroupDesc) String() string { s := strings.Join([]string{`&RuleGroupDesc{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Interval:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, + `Interval:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Rules:` + repeatedStringForRules + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Options:` + repeatedStringForOptions + `,`, @@ -674,7 +674,7 @@ func (this *RuleDesc) String() string { `Expr:` + fmt.Sprintf("%v", this.Expr) + `,`, `Record:` + fmt.Sprintf("%v", this.Record) + `,`, `Alert:` + fmt.Sprintf("%v", this.Alert) + `,`, - `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, + `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, `}`, diff --git a/pkg/storage/batch_test.go b/pkg/storage/batch_test.go index 34d8e350045a..115e625a28af 100644 --- a/pkg/storage/batch_test.go +++ b/pkg/storage/batch_test.go @@ -100,7 +100,6 @@ func Test_newLogBatchChunkIterator(t *testing.T) { var tests map[string]testCase for _, periodConfig := range periodConfigs { - periodConfig := periodConfig chunkfmt, headfmt, err := periodConfig.ChunkFormat() require.NoError(t, err) @@ -1000,7 +999,6 @@ func Test_newLogBatchChunkIterator(t *testing.T) { for _, schemaConfig := range schemaConfigs { s := schemaConfig for name, tt := range tests { - tt := tt t.Run(name, func(t *testing.T) { it, err := newLogBatchIterator(context.Background(), s, NilMetrics, tt.chunks, tt.batchSize, newMatchers(tt.matchers), log.NewNoopPipeline(), tt.direction, tt.start, tt.end, nil) require.NoError(t, err) @@ -1416,7 +1414,6 @@ func Test_newSampleBatchChunkIterator(t *testing.T) { } for name, tt := range tests { - tt := tt t.Run(name, func(t *testing.T) { ex, err := log.NewLineSampleExtractor(log.CountExtractor, nil, nil, false, false) require.NoError(t, err) diff --git a/pkg/storage/bucket/azure/config_test.go b/pkg/storage/bucket/azure/config_test.go index 756ae298b65c..82357faa147e 100644 --- a/pkg/storage/bucket/azure/config_test.go +++ b/pkg/storage/bucket/azure/config_test.go @@ -86,8 +86,6 @@ http: } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { cfg := Config{} flagext.DefaultValues(&cfg) diff --git a/pkg/storage/bucket/client_test.go b/pkg/storage/bucket/client_test.go index 489f7d2f1f26..fb7acec91089 100644 --- a/pkg/storage/bucket/client_test.go +++ b/pkg/storage/bucket/client_test.go @@ -69,8 +69,6 @@ func TestNewClient(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { // Load config cfg := Config{} diff --git a/pkg/storage/bucket/http/config_test.go b/pkg/storage/bucket/http/config_test.go index d7da35e581af..6ac47eb4a257 100644 --- a/pkg/storage/bucket/http/config_test.go +++ b/pkg/storage/bucket/http/config_test.go @@ -65,8 +65,6 @@ max_connections_per_host: 8 } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { cfg := Config{} flagext.DefaultValues(&cfg) diff --git a/pkg/storage/bucket/s3/config_test.go b/pkg/storage/bucket/s3/config_test.go index 214b2f6ff0cd..3f32e8f84793 100644 --- a/pkg/storage/bucket/s3/config_test.go +++ b/pkg/storage/bucket/s3/config_test.go @@ -111,8 +111,6 @@ http: } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { cfg := Config{} flagext.DefaultValues(&cfg) diff --git a/pkg/storage/chunk/client/aws/dynamodb_index_reader.go b/pkg/storage/chunk/client/aws/dynamodb_index_reader.go index 19ea676451a9..73c1cb74c75a 100644 --- a/pkg/storage/chunk/client/aws/dynamodb_index_reader.go +++ b/pkg/storage/chunk/client/aws/dynamodb_index_reader.go @@ -80,8 +80,7 @@ func (r *dynamodbIndexReader) ReadIndexEntries(ctx context.Context, tableName st var readerGroup errgroup.Group // Start a goroutine for each processor - for i, processor := range processors { - segment, processor := i, processor // https://golang.org/doc/faq#closures_and_goroutines + for segment, processor := range processors { readerGroup.Go(func() error { input := &dynamodb.ScanInput{ TableName: aws.String(tableName), diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go index 7747f2761800..9ab8c9116339 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client.go @@ -393,7 +393,7 @@ func (a *S3ObjectClient) GetObject(ctx context.Context, objectKey string) (io.Re // Map the key into a bucket bucket := a.bucketFromKey(objectKey) - var lastErr error + lastErr := ctx.Err() retries := backoff.New(ctx, a.cfg.BackoffConfig) for retries.Ongoing() { diff --git a/pkg/storage/chunk/client/aws/s3_storage_client_test.go b/pkg/storage/chunk/client/aws/s3_storage_client_test.go index 5647934e1afa..38b0215b7913 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client_test.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client_test.go @@ -234,6 +234,31 @@ func TestRequestMiddleware(t *testing.T) { } } +func TestS3ObjectClient_GetObject_CanceledContext(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, r.Header.Get("echo-me")) + })) + defer ts.Close() + + cfg := S3Config{ + Endpoint: ts.URL, + BucketNames: "buck-o", + S3ForcePathStyle: true, + Insecure: true, + AccessKeyID: "key", + SecretAccessKey: flagext.SecretWithValue("secret"), + } + + client, err := NewS3ObjectClient(cfg, hedging.Config{}) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + _, _, err = client.GetObject(ctx, "key") + require.Error(t, err, "GetObject should fail when given a canceled context") +} + func Test_Hedging(t *testing.T) { for _, tc := range []struct { name string @@ -272,7 +297,6 @@ func Test_Hedging(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { count := atomic.NewInt32(0) diff --git a/pkg/storage/chunk/client/azure/blob_storage_client_test.go b/pkg/storage/chunk/client/azure/blob_storage_client_test.go index cedc5057e85b..8a3362d4a7c7 100644 --- a/pkg/storage/chunk/client/azure/blob_storage_client_test.go +++ b/pkg/storage/chunk/client/azure/blob_storage_client_test.go @@ -118,7 +118,6 @@ func Test_Hedging(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { count := atomic.NewInt32(0) // hijack the client to count the number of calls diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go index 5bece14a18db..c885c4c1d780 100644 --- a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go +++ b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go @@ -55,7 +55,6 @@ func Test_Hedging(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { count := atomic.NewInt32(0) server := fakeServer(t, 200*time.Millisecond, count) diff --git a/pkg/storage/chunk/client/grpc/grpc.pb.go b/pkg/storage/chunk/client/grpc/grpc.pb.go index 6468535c2ab1..d76002adfc38 100644 --- a/pkg/storage/chunk/client/grpc/grpc.pb.go +++ b/pkg/storage/chunk/client/grpc/grpc.pb.go @@ -9,10 +9,10 @@ import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" io "io" math "math" math_bits "math/bits" @@ -1998,30 +1998,30 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GrpcStoreClient interface { // / WriteIndex writes batch of indexes to the index tables. - WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) // / QueryIndex reads the indexes required for given query & sends back the batch of rows // / in rpc streams QueryIndex(ctx context.Context, in *QueryIndexRequest, opts ...grpc.CallOption) (GrpcStore_QueryIndexClient, error) // / DeleteIndex deletes the batch of index entries from the index tables - DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) // / PutChunks saves the batch of chunks into the chunk tables. - PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*empty.Empty, error) // / GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams // / batching needs to be performed at server level as per requirement instead of sending single chunk per stream. // / In GetChunks rpc request send buf as nil GetChunks(ctx context.Context, in *GetChunksRequest, opts ...grpc.CallOption) (GrpcStore_GetChunksClient, error) // / DeleteChunks deletes the chunks based on chunkID. - DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*empty.Empty, error) // / Lists all the tables that exists in the database. - ListTables(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) + ListTables(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) // / Creates a table with provided name & attributes. - CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) // Deletes a table using table name provided. - DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) // Describes a table information for the provided table. DescribeTable(ctx context.Context, in *DescribeTableRequest, opts ...grpc.CallOption) (*DescribeTableResponse, error) // Update a table with newly provided table information. - UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) } type grpcStoreClient struct { @@ -2032,8 +2032,8 @@ func NewGrpcStoreClient(cc *grpc.ClientConn) GrpcStoreClient { return &grpcStoreClient{cc} } -func (c *grpcStoreClient) WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *grpcStoreClient) WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/WriteIndex", in, out, opts...) if err != nil { return nil, err @@ -2073,8 +2073,8 @@ func (x *grpcStoreQueryIndexClient) Recv() (*QueryIndexResponse, error) { return m, nil } -func (c *grpcStoreClient) DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *grpcStoreClient) DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteIndex", in, out, opts...) if err != nil { return nil, err @@ -2082,8 +2082,8 @@ func (c *grpcStoreClient) DeleteIndex(ctx context.Context, in *DeleteIndexReques return out, nil } -func (c *grpcStoreClient) PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *grpcStoreClient) PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/PutChunks", in, out, opts...) if err != nil { return nil, err @@ -2123,8 +2123,8 @@ func (x *grpcStoreGetChunksClient) Recv() (*GetChunksResponse, error) { return m, nil } -func (c *grpcStoreClient) DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *grpcStoreClient) DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteChunks", in, out, opts...) if err != nil { return nil, err @@ -2132,7 +2132,7 @@ func (c *grpcStoreClient) DeleteChunks(ctx context.Context, in *ChunkID, opts .. return out, nil } -func (c *grpcStoreClient) ListTables(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) { +func (c *grpcStoreClient) ListTables(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) { out := new(ListTablesResponse) err := c.cc.Invoke(ctx, "/grpc.grpc_store/ListTables", in, out, opts...) if err != nil { @@ -2141,8 +2141,8 @@ func (c *grpcStoreClient) ListTables(ctx context.Context, in *emptypb.Empty, opt return out, nil } -func (c *grpcStoreClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *grpcStoreClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/CreateTable", in, out, opts...) if err != nil { return nil, err @@ -2150,8 +2150,8 @@ func (c *grpcStoreClient) CreateTable(ctx context.Context, in *CreateTableReques return out, nil } -func (c *grpcStoreClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *grpcStoreClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteTable", in, out, opts...) if err != nil { return nil, err @@ -2168,8 +2168,8 @@ func (c *grpcStoreClient) DescribeTable(ctx context.Context, in *DescribeTableRe return out, nil } -func (c *grpcStoreClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *grpcStoreClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/UpdateTable", in, out, opts...) if err != nil { return nil, err @@ -2180,67 +2180,67 @@ func (c *grpcStoreClient) UpdateTable(ctx context.Context, in *UpdateTableReques // GrpcStoreServer is the server API for GrpcStore service. type GrpcStoreServer interface { // / WriteIndex writes batch of indexes to the index tables. - WriteIndex(context.Context, *WriteIndexRequest) (*emptypb.Empty, error) + WriteIndex(context.Context, *WriteIndexRequest) (*empty.Empty, error) // / QueryIndex reads the indexes required for given query & sends back the batch of rows // / in rpc streams QueryIndex(*QueryIndexRequest, GrpcStore_QueryIndexServer) error // / DeleteIndex deletes the batch of index entries from the index tables - DeleteIndex(context.Context, *DeleteIndexRequest) (*emptypb.Empty, error) + DeleteIndex(context.Context, *DeleteIndexRequest) (*empty.Empty, error) // / PutChunks saves the batch of chunks into the chunk tables. - PutChunks(context.Context, *PutChunksRequest) (*emptypb.Empty, error) + PutChunks(context.Context, *PutChunksRequest) (*empty.Empty, error) // / GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams // / batching needs to be performed at server level as per requirement instead of sending single chunk per stream. // / In GetChunks rpc request send buf as nil GetChunks(*GetChunksRequest, GrpcStore_GetChunksServer) error // / DeleteChunks deletes the chunks based on chunkID. - DeleteChunks(context.Context, *ChunkID) (*emptypb.Empty, error) + DeleteChunks(context.Context, *ChunkID) (*empty.Empty, error) // / Lists all the tables that exists in the database. - ListTables(context.Context, *emptypb.Empty) (*ListTablesResponse, error) + ListTables(context.Context, *empty.Empty) (*ListTablesResponse, error) // / Creates a table with provided name & attributes. - CreateTable(context.Context, *CreateTableRequest) (*emptypb.Empty, error) + CreateTable(context.Context, *CreateTableRequest) (*empty.Empty, error) // Deletes a table using table name provided. - DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) + DeleteTable(context.Context, *DeleteTableRequest) (*empty.Empty, error) // Describes a table information for the provided table. DescribeTable(context.Context, *DescribeTableRequest) (*DescribeTableResponse, error) // Update a table with newly provided table information. - UpdateTable(context.Context, *UpdateTableRequest) (*emptypb.Empty, error) + UpdateTable(context.Context, *UpdateTableRequest) (*empty.Empty, error) } // UnimplementedGrpcStoreServer can be embedded to have forward compatible implementations. type UnimplementedGrpcStoreServer struct { } -func (*UnimplementedGrpcStoreServer) WriteIndex(ctx context.Context, req *WriteIndexRequest) (*emptypb.Empty, error) { +func (*UnimplementedGrpcStoreServer) WriteIndex(ctx context.Context, req *WriteIndexRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method WriteIndex not implemented") } func (*UnimplementedGrpcStoreServer) QueryIndex(req *QueryIndexRequest, srv GrpcStore_QueryIndexServer) error { return status.Errorf(codes.Unimplemented, "method QueryIndex not implemented") } -func (*UnimplementedGrpcStoreServer) DeleteIndex(ctx context.Context, req *DeleteIndexRequest) (*emptypb.Empty, error) { +func (*UnimplementedGrpcStoreServer) DeleteIndex(ctx context.Context, req *DeleteIndexRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteIndex not implemented") } -func (*UnimplementedGrpcStoreServer) PutChunks(ctx context.Context, req *PutChunksRequest) (*emptypb.Empty, error) { +func (*UnimplementedGrpcStoreServer) PutChunks(ctx context.Context, req *PutChunksRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method PutChunks not implemented") } func (*UnimplementedGrpcStoreServer) GetChunks(req *GetChunksRequest, srv GrpcStore_GetChunksServer) error { return status.Errorf(codes.Unimplemented, "method GetChunks not implemented") } -func (*UnimplementedGrpcStoreServer) DeleteChunks(ctx context.Context, req *ChunkID) (*emptypb.Empty, error) { +func (*UnimplementedGrpcStoreServer) DeleteChunks(ctx context.Context, req *ChunkID) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteChunks not implemented") } -func (*UnimplementedGrpcStoreServer) ListTables(ctx context.Context, req *emptypb.Empty) (*ListTablesResponse, error) { +func (*UnimplementedGrpcStoreServer) ListTables(ctx context.Context, req *empty.Empty) (*ListTablesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListTables not implemented") } -func (*UnimplementedGrpcStoreServer) CreateTable(ctx context.Context, req *CreateTableRequest) (*emptypb.Empty, error) { +func (*UnimplementedGrpcStoreServer) CreateTable(ctx context.Context, req *CreateTableRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateTable not implemented") } -func (*UnimplementedGrpcStoreServer) DeleteTable(ctx context.Context, req *DeleteTableRequest) (*emptypb.Empty, error) { +func (*UnimplementedGrpcStoreServer) DeleteTable(ctx context.Context, req *DeleteTableRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteTable not implemented") } func (*UnimplementedGrpcStoreServer) DescribeTable(ctx context.Context, req *DescribeTableRequest) (*DescribeTableResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DescribeTable not implemented") } -func (*UnimplementedGrpcStoreServer) UpdateTable(ctx context.Context, req *UpdateTableRequest) (*emptypb.Empty, error) { +func (*UnimplementedGrpcStoreServer) UpdateTable(ctx context.Context, req *UpdateTableRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateTable not implemented") } @@ -2363,7 +2363,7 @@ func _GrpcStore_DeleteChunks_Handler(srv interface{}, ctx context.Context, dec f } func _GrpcStore_ListTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) + in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } @@ -2375,7 +2375,7 @@ func _GrpcStore_ListTables_Handler(srv interface{}, ctx context.Context, dec fun FullMethod: "/grpc.grpc_store/ListTables", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).ListTables(ctx, req.(*emptypb.Empty)) + return srv.(GrpcStoreServer).ListTables(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } diff --git a/pkg/storage/chunk/client/openstack/swift_object_client_test.go b/pkg/storage/chunk/client/openstack/swift_object_client_test.go index ce2f130f1bfc..efcd2807fdc9 100644 --- a/pkg/storage/chunk/client/openstack/swift_object_client_test.go +++ b/pkg/storage/chunk/client/openstack/swift_object_client_test.go @@ -58,7 +58,6 @@ func Test_Hedging(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { count := atomic.NewInt32(0) // hijack the transport to count the number of calls diff --git a/pkg/storage/config/schema_config_test.go b/pkg/storage/config/schema_config_test.go index 74980ad513aa..8a8de6077e44 100644 --- a/pkg/storage/config/schema_config_test.go +++ b/pkg/storage/config/schema_config_test.go @@ -384,8 +384,6 @@ func TestSchemaConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { actual := testData.config.Validate() assert.ErrorIs(t, actual, testData.err) diff --git a/pkg/storage/lazy_chunk_test.go b/pkg/storage/lazy_chunk_test.go index 6757e94e1e95..986358f6aea7 100644 --- a/pkg/storage/lazy_chunk_test.go +++ b/pkg/storage/lazy_chunk_test.go @@ -38,8 +38,6 @@ func TestLazyChunkIterator(t *testing.T) { } for _, periodConfig := range periodConfigs { - periodConfig := periodConfig - chunkfmt, headfmt, err := periodConfig.ChunkFormat() require.NoError(t, err) diff --git a/pkg/storage/store.go b/pkg/storage/store.go index db4a0a498e17..768708a24f34 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -194,7 +194,6 @@ func NewStore(cfg Config, storeCfg config.ChunkStoreConfig, schemaCfg config.Sch func (s *LokiStore) init() error { for i, p := range s.schemaCfg.Configs { - p := p chunkClient, err := s.chunkClientForPeriod(p) if err != nil { return err diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index 197fcfa6f1e9..cc17cd0a92ef 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -1771,7 +1771,6 @@ func Test_GetSeries(t *testing.T) { []logproto.SeriesIdentifier{}, }, } { - tt := tt t.Run(tt.name, func(t *testing.T) { if tt.req.Selector != "" { tt.req.Plan = &plan.QueryPlan{ diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go index 053078180547..e49852d777b0 100644 --- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go +++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go @@ -316,7 +316,10 @@ func (f *Fetcher) FetchBlocks(ctx context.Context, refs []BlockRef, opts ...Fetc } func (f *Fetcher) processTask(ctx context.Context, task downloadRequest[BlockRef, BlockDirectory]) { + errLogger := log.With(f.logger, "task", task.key, "msg", "failed to process download request") + if ctx.Err() != nil { + level.Error(errLogger).Log("err", ctx.Err()) task.errors <- ctx.Err() return } @@ -324,6 +327,7 @@ func (f *Fetcher) processTask(ctx context.Context, task downloadRequest[BlockRef // check if block was fetched while task was waiting in queue result, exists, err := f.fromCache(ctx, task.key) if err != nil { + level.Error(errLogger).Log("err", err) task.errors <- err return } @@ -341,6 +345,7 @@ func (f *Fetcher) processTask(ctx context.Context, task downloadRequest[BlockRef // fetch from storage result, err = f.fetchBlock(ctx, task.item) if err != nil { + level.Error(errLogger).Log("err", err) task.errors <- err return } @@ -354,6 +359,7 @@ func (f *Fetcher) processTask(ctx context.Context, task downloadRequest[BlockRef err = f.blocksCache.PutInc(ctx, key, result) } if err != nil { + level.Error(errLogger).Log("err", err) task.errors <- err return } diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go index 6c60c64b5f2d..e330fd41c1fb 100644 --- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go @@ -176,7 +176,6 @@ func TestFetcher_DownloadQueue(t *testing.T) { size: 1, workers: 0, err: "queue requires at least 1 worker", }, } { - tc := tc t.Run(tc.err, func(t *testing.T) { _, err := newDownloadQueue[bool, bool]( tc.size, diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go index 043d36d00401..433835e0ff7e 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/compacted_index_test.go @@ -23,7 +23,6 @@ import ( func TestCompactedIndex_IndexProcessor(t *testing.T) { for _, tt := range allSchemas { - tt := tt t.Run(tt.schema, func(t *testing.T) { cm := storage.NewClientMetrics() defer cm.Unregister() diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go index 26e9aef596bf..c4acfd33b67a 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/iterator_test.go @@ -24,7 +24,6 @@ import ( func Test_ChunkIterator(t *testing.T) { for _, tt := range allSchemas { - tt := tt t.Run(tt.schema, func(t *testing.T) { cm := storage.NewClientMetrics() defer cm.Unregister() @@ -108,7 +107,6 @@ func Test_ChunkIteratorContextCancelation(t *testing.T) { func Test_SeriesCleaner(t *testing.T) { for _, tt := range allSchemas { - tt := tt t.Run(tt.schema, func(t *testing.T) { cm := storage.NewClientMetrics() defer cm.Unregister() diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor_test.go index 4fa6d598c8e3..9bd76e22c6c9 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor_test.go @@ -368,7 +368,6 @@ func TestTable_RecreateCompactedDB(t *testing.T) { shouldRecreateCompactedDB: true, }, } { - tt := tt t.Run(name, func(t *testing.T) { if !tt.compactedDBMtime.IsZero() { require.Equal(t, 1, tt.dbCount) diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager_test.go b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager_test.go index e8d9e3efcc8d..ea360dbd3cf4 100644 --- a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager_test.go +++ b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager_test.go @@ -319,7 +319,6 @@ func TestTableManager_ensureQueryReadiness(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - tc := tc // just to make the linter happy resetTables() tableManager.cfg.QueryReadyNumDays = tc.queryReadyNumDaysCfg tableManager.cfg.Limits = &tc.queryReadinessLimits diff --git a/pkg/storage/wal/segment.go b/pkg/storage/wal/segment.go index 83841aab1697..da656888f55f 100644 --- a/pkg/storage/wal/segment.go +++ b/pkg/storage/wal/segment.go @@ -231,7 +231,6 @@ func (b *SegmentWriter) Meta(id string) *metastorepb.BlockMeta { } result := make([]*metastorepb.TenantStreams, 0, len(tenants)) for _, tenant := range tenants { - tenant := tenant result = append(result, tenant) } sort.Slice(result, func(i, j int) bool { @@ -379,7 +378,6 @@ func (b *SegmentWriter) Reset() { b.firstAppend = time.Time{} b.lastAppend = time.Time{} for _, s := range b.streams { - s := s s.Reset() streamSegmentPool.Put(s) } diff --git a/pkg/storage/wal/segment_test.go b/pkg/storage/wal/segment_test.go index cbe42587bf7a..52e50e9e2f88 100644 --- a/pkg/storage/wal/segment_test.go +++ b/pkg/storage/wal/segment_test.go @@ -101,7 +101,6 @@ func TestWalSegmentWriter_Append(t *testing.T) { // Run the test cases for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() // Create a new WalSegmentWriter diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go index 0e08239b20ad..cc581a0cd8c7 100644 --- a/pkg/util/marshal/marshal_test.go +++ b/pkg/util/marshal/marshal_test.go @@ -1125,7 +1125,6 @@ func Test_WriteQueryPatternsResponseJSON(t *testing.T) { `{"status":"success","data":[{"pattern":"foo <*> bar","samples":[]},{"pattern":"foo <*> buzz","samples":[]}]}`, }, } { - tc := tc t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { var b bytes.Buffer err := WriteQueryPatternsResponseJSON(tc.input, &b) diff --git a/pkg/util/query_string_builder_test.go b/pkg/util/query_string_builder_test.go index a97d5971557d..aed48a467d94 100644 --- a/pkg/util/query_string_builder_test.go +++ b/pkg/util/query_string_builder_test.go @@ -33,8 +33,6 @@ func TestQueryStringBuilder(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { params := NewQueryStringBuilder() diff --git a/pkg/util/string_test.go b/pkg/util/string_test.go index b58bc1ee9145..03cc69c7f599 100644 --- a/pkg/util/string_test.go +++ b/pkg/util/string_test.go @@ -27,8 +27,6 @@ func TestStringsContain(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 18d0aca93a3d..55ed8d2ed684 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.17.1 + +- [BUGFIX] Added missing `loki.storage.azure.chunkDelimiter` parameter to Helm chart. + ## 6.17.0 - [CHANGE] Changed version of Grafana Loki to 3.2.0 diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 07941278dfa0..2381bac04810 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.2.0 -version: 6.17.0 +version: 6.17.1 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 12ebf95948e2..6f7566c606f9 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.17.0](https://img.shields.io/badge/Version-6.17.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.2.0](https://img.shields.io/badge/AppVersion-3.2.0-informational?style=flat-square) +![Version: 6.17.1](https://img.shields.io/badge/Version-6.17.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.2.0](https://img.shields.io/badge/AppVersion-3.2.0-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index 9a4ab135db92..f302bc5a621a 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -284,6 +284,9 @@ azure: {{- with .endpointSuffix }} endpoint_suffix: {{ . }} {{- end }} + {{- with .chunkDelimiter }} + chunk_delimiter: {{ . }} + {{- end }} {{- end -}} {{- else if eq .Values.loki.storage.type "alibabacloud" -}} {{- with .Values.loki.storage.alibabacloud }} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 964089148051..3185f780ccd2 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -351,6 +351,7 @@ loki: userAssignedId: null requestTimeout: null endpointSuffix: null + chunkDelimiter: null swift: auth_version: null auth_url: null diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index 96480efeb823..961038477cdf 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -10,7 +10,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/grafana/dskit v0.0.0-20240930165212-f52de24af9bc + github.com/grafana/dskit v0.0.0-20241004175247-687ec485facf github.com/grafana/loki/v3 v3.0.0-20240809103847-9315b3d03d79 github.com/prometheus/common v0.55.0 github.com/stretchr/testify v1.9.0 diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index 024bef722f7a..f7251a0d3370 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -216,8 +216,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/dskit v0.0.0-20240930165212-f52de24af9bc h1:OLRT3mpHjvTjq4Km7L36lipZ+hTOX1U3jct0DFMADdo= -github.com/grafana/dskit v0.0.0-20240930165212-f52de24af9bc/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= +github.com/grafana/dskit v0.0.0-20241004175247-687ec485facf h1:ZafqZwIpdCCMifH9Ok6C98rYaCh5OZeyyHLbU0FPedg= +github.com/grafana/dskit v0.0.0-20241004175247-687ec485facf/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 h1:NznuPwItog+rwdVg8hAuGKP29ndRSzJAwhxKldkP8oQ= diff --git a/tools/querytee/proxy_endpoint.go b/tools/querytee/proxy_endpoint.go index 3bcaa3be2440..7a6779b6ad88 100644 --- a/tools/querytee/proxy_endpoint.go +++ b/tools/querytee/proxy_endpoint.go @@ -109,9 +109,6 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back wg.Add(len(p.backends)) for i, b := range p.backends { - i := i - b := b - go func() { defer wg.Done() var ( diff --git a/tools/querytee/proxy_endpoint_test.go b/tools/querytee/proxy_endpoint_test.go index 728f8fec415c..5cfee42b504d 100644 --- a/tools/querytee/proxy_endpoint_test.go +++ b/tools/querytee/proxy_endpoint_test.go @@ -95,8 +95,6 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) { } for testName, testData := range tests { - testData := testData - t.Run(testName, func(t *testing.T) { endpoint := NewProxyEndpoint(testData.backends, "test", NewProxyMetrics(nil), log.NewNopLogger(), nil, false) diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der deleted file mode 100644 index 958f3cfaddf3..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der deleted file mode 100644 index d2817641bafb..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der deleted file mode 100644 index d8c3710c85f9..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der deleted file mode 100644 index dae619c09751..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der deleted file mode 100644 index ce7f8d31d680..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der deleted file mode 100644 index 04b0d73600b7..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der deleted file mode 100644 index d8c3710c85f9..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem deleted file mode 100644 index 493a5a264810..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem deleted file mode 100644 index 55a7f10c742d..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der deleted file mode 100644 index 04b0d73600b7..000000000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7244..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem deleted file mode 100644 index 81afea783df9..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem deleted file mode 100644 index 493a5a264810..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem deleted file mode 100644 index 55a7f10c742d..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7244..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem deleted file mode 100644 index 81afea783df9..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem deleted file mode 100644 index 493a5a264810..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem deleted file mode 100644 index 55a7f10c742d..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7244..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem deleted file mode 100644 index 81afea783df9..000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem deleted file mode 100644 index 493a5a264810..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem deleted file mode 100644 index 55a7f10c742d..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem deleted file mode 100644 index 60c4cf069157..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD -VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy -MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl -c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn -3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO -7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb -A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T -cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO -VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL -dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI -je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+ -l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8 -YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM -E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK -BTq2PBvc59T6OFLq ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem deleted file mode 100644 index 9d112d1e9ff9..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH -65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM -FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0 -rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6 -M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf -2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0 -ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA -NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8 -LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT -EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW -/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+ -XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI -wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU -lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC -k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK -UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB -8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o -4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB -Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs -FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv -r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap -CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6 -w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr -63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8 -Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r -fVMrcL3jSf/W1Xh4TgtyoU8= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem deleted file mode 100644 index 44e436f6ec7c..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00 -MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE -CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW -QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2 -r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg -Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n -rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe -d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR -MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/ -yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN -XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x -fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN -9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa -VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G -MTV7jmY9TBPtfhRuO/cG650+F+cw ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem deleted file mode 100644 index 68c60613458a..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00 -MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE -AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh -HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn -H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK -GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA -Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe -LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA -AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G -A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB -AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT -EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn -JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe -2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs -HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI -4Wcvfz/isxgmH1UqIt3oc6ad ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem deleted file mode 100644 index b14ad0f724ee..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92 -GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza -ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5 -KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7 -CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6 -CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd -Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz -yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/ -Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V -4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY -QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy -0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp -2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms -GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz -wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ -SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2 -cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f -R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn -htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi -AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw -O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh -cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40 -EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw -SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x -gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe -1ChfPP1AH+/75MJCvu6wQBQv ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem deleted file mode 100644 index ad1bad598459..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE -AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00 -MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE -CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw -DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe -2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW -HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB -cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4 -5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z -ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA -ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG -lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+ -XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt -Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9 -ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU -gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A== ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem deleted file mode 100644 index bcf08e4f12f4..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS -xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR -4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS -n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp -B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7 -vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN -DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S -pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj -ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF -p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn -r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B -7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ -Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5 -fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz -1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk -emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP -ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw -Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF -vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD -B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh -eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi -elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8 -Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo -mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk -k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ -8x3gNkxJRb4NaLIoNzAhCoo= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem deleted file mode 100644 index 0f98322c7244..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem deleted file mode 100644 index 81afea783df9..000000000000 --- a/vendor/github.com/google/s2a-go/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go b/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go index 2fb15d8af98d..cffa4b2fcc5d 100644 --- a/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go +++ b/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go @@ -13,15 +13,25 @@ type PartitionRingReader interface { PartitionRing() *PartitionRing } +type InstanceRingReader interface { + // GetInstance return the InstanceDesc for the given instanceID or an error + // if the instance doesn't exist in the ring. The returned InstanceDesc is NOT a + // deep copy, so the caller should never modify it. + GetInstance(string) (InstanceDesc, error) + + // InstancesCount returns the number of instances in the ring. + InstancesCount() int +} + // PartitionInstanceRing holds a partitions ring and a instances ring, and provide functions // to look up the intersection of the two (e.g. healthy instances by partition). type PartitionInstanceRing struct { partitionsRingReader PartitionRingReader - instancesRing *Ring + instancesRing InstanceRingReader heartbeatTimeout time.Duration } -func NewPartitionInstanceRing(partitionsRingWatcher PartitionRingReader, instancesRing *Ring, heartbeatTimeout time.Duration) *PartitionInstanceRing { +func NewPartitionInstanceRing(partitionsRingWatcher PartitionRingReader, instancesRing InstanceRingReader, heartbeatTimeout time.Duration) *PartitionInstanceRing { return &PartitionInstanceRing{ partitionsRingReader: partitionsRingWatcher, instancesRing: instancesRing, @@ -33,7 +43,7 @@ func (r *PartitionInstanceRing) PartitionRing() *PartitionRing { return r.partitionsRingReader.PartitionRing() } -func (r *PartitionInstanceRing) InstanceRing() *Ring { +func (r *PartitionInstanceRing) InstanceRing() InstanceRingReader { return r.instancesRing } diff --git a/vendor/github.com/grafana/jsonparser/parser.go b/vendor/github.com/grafana/jsonparser/parser.go index 5df2a463dcee..9958ab4ee531 100644 --- a/vendor/github.com/grafana/jsonparser/parser.go +++ b/vendor/github.com/grafana/jsonparser/parser.go @@ -512,10 +512,10 @@ func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]str } for pi, p := range paths { - if len(p) < level+1 || pathFlags[pi] || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) { + if len(p) < level+1 || pathFlags[pi] || p[level] == "" || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) { continue } - if len(p[level]) >= 2 { + if len(p[level]) > 2 { aIdx, _ := strconv.Atoi(p[level][1 : len(p[level])-1]) arrIdxFlags[aIdx] = x pIdxFlags[pi] = true @@ -712,12 +712,10 @@ func WriteToBuffer(buffer []byte, str string) int { } /* - Del - Receives existing data structure, path to delete. Returns: `data` - return modified data - */ func Delete(data []byte, keys ...string) []byte { lk := len(keys) @@ -798,13 +796,11 @@ func Delete(data []byte, keys ...string) []byte { } /* - Set - Receives existing data structure, path to set, and data to set at that key. Returns: `value` - modified byte array `err` - On any parsing error - */ func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) { // ensure keys are set diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go index 51e295a2bccb..329ef8336669 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go @@ -12,8 +12,57 @@ import ( ) func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat if percpu { - return []TimesStat{}, common.ErrNotImplementedError + per_out, err := invoke.CommandWithContext(ctx, "sar", "-u", "-P", "ALL", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(per_out), "\n") + if len(lines) < 6 { + return []TimesStat{}, common.ErrNotImplementedError + } + + hp := strings.Fields(lines[5]) // headers + for l := 6; l < len(lines)-1; l++ { + ct := &TimesStat{} + v := strings.Fields(lines[l]) // values + for i, header := range hp { + // We're done in any of these use cases + if i >= len(v) || v[0] == "-" { + break + } + + // Position variable for v + pos := i + // There is a missing field at the beginning of all but the first line + // so adjust the position + if l > 6 { + pos = i - 1 + } + // We don't want invalid positions + if pos < 0 { + continue + } + + if t, err := strconv.ParseFloat(v[pos], 64); err == nil { + switch header { + case `cpu`: + ct.CPU = strconv.FormatFloat(t, 'f', -1, 64) + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + // Valid CPU data, so append it + ret = append(ret, *ct) + } } else { out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") if err != nil { @@ -24,26 +73,28 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { return []TimesStat{}, common.ErrNotImplementedError } - ret := TimesStat{CPU: "cpu-total"} + ct := &TimesStat{CPU: "cpu-total"} h := strings.Fields(lines[len(lines)-3]) // headers v := strings.Fields(lines[len(lines)-2]) // values for i, header := range h { if t, err := strconv.ParseFloat(v[i], 64); err == nil { switch header { case `%usr`: - ret.User = t + ct.User = t case `%sys`: - ret.System = t + ct.System = t case `%wio`: - ret.Iowait = t + ct.Iowait = t case `%idle`: - ret.Idle = t + ct.Idle = t } } } - return []TimesStat{ret}, nil + ret = append(ret, *ct) } + + return ret, nil } func InfoWithContext(ctx context.Context) ([]InfoStat, error) { @@ -78,6 +129,20 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { } } break + } else if strings.HasPrefix(line, "System Model:") { + p := strings.Split(string(line), ":") + if p != nil { + ret.VendorID = strings.TrimSpace(p[1]) + } + } else if strings.HasPrefix(line, "Processor Type:") { + p := strings.Split(string(line), ":") + if p != nil { + c := strings.Split(string(p[1]), "_") + if c != nil { + ret.Family = strings.TrimSpace(c[0]) + ret.Model = strings.TrimSpace(c[1]) + } + } } } return []InfoStat{ret}, nil diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go index c68d6bff0f64..5d17c7e977e2 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go @@ -11,9 +11,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -136,7 +137,7 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { c.Model = matches[4] t, err := strconv.ParseInt(matches[5], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %w", line, err) } c.Stepping = int32(t) } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { @@ -150,12 +151,12 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { t, err := strconv.ParseInt(matches[1], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %w", line, err) } cpuNum = int(t) t2, err := strconv.ParseInt(matches[2], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %w", line, err) } c.Cores = int32(t2) } diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go index f78c61a25b62..5f595e7b3eff 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -395,7 +395,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { for _, line := range lines { line = strings.ToLower(line) if strings.HasPrefix(line, "processor") { - _, err = strconv.Atoi(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:])) + _, err = strconv.ParseInt(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:]), 10, 32) if err == nil { ret++ } @@ -464,11 +464,11 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { } fields[0] = strings.TrimSpace(fields[0]) if fields[0] == "physical id" || fields[0] == "cpu cores" { - val, err := strconv.Atoi(strings.TrimSpace(fields[1])) + val, err := strconv.ParseInt(strings.TrimSpace(fields[1]), 10, 32) if err != nil { continue } - currentInfo[fields[0]] = val + currentInfo[fields[0]] = int(val) } } ret := 0 diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go index 2cda5cd24375..198be5e644db 100644 --- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go @@ -9,9 +9,10 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) const ( diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go index 642aabc55832..868ea4daeed2 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io" + "math" "net/url" "os" "os/exec" @@ -153,7 +154,7 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { var ret []string r := bufio.NewReader(f) - for i := 0; i < n+int(offset) || n < 0; i++ { + for i := uint(0); i < uint(n)+offset || n < 0; i++ { line, err := r.ReadString('\n') if err != nil { if err == io.EOF && len(line) > 0 { @@ -161,7 +162,7 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { } break } - if i < int(offset) { + if i < offset { continue } ret = append(ret, strings.Trim(line, "\n")) @@ -463,3 +464,11 @@ func getSysctrlEnv(env []string) []string { } return env } + +// Round places rounds the number 'val' to 'n' decimal places +func Round(val float64, n int) float64 { + // Calculate the power of 10 to the n + pow10 := math.Pow(10, float64(n)) + // Multiply the value by pow10, round it, then divide it by pow10 + return math.Round(val*pow10) / pow10 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go index 85802dcb097a..541de93d3573 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -90,6 +90,8 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if enableCache { atomic.StoreUint64(&cachedBootTime, t) } + + return t, nil } filename := HostProcWithContext(ctx, "uptime") @@ -97,6 +99,8 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if err != nil { return handleBootTimeFileReadErr(err) } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + if len(lines) != 1 { return 0, fmt.Errorf("wrong uptime format") } @@ -105,7 +109,6 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if err != nil { return 0, err } - currentTime := float64(time.Now().UnixNano()) / float64(time.Second) t := currentTime - b if enableCache { diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go index 8108a1caca7b..504f13ffd980 100644 --- a/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go @@ -7,7 +7,7 @@ import ( ) // Sleep awaits for provided interval. -// Can be interrupted by context cancelation. +// Can be interrupted by context cancellation. func Sleep(ctx context.Context, interval time.Duration) error { timer := time.NewTimer(interval) select { diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go index 916bff30df3e..ac2c39dd3825 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go @@ -5,6 +5,8 @@ package mem import ( "context" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { @@ -14,3 +16,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } + +func SwapDevices() ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go index cfcc4f90f16e..bc3c0ed3b4a8 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go @@ -12,7 +12,7 @@ import ( ) func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - vmem, swap, err := callSVMon(ctx) + vmem, swap, err := callSVMon(ctx, true) if err != nil { return nil, err } @@ -25,7 +25,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - _, swap, err := callSVMon(ctx) + _, swap, err := callSVMon(ctx, false) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { return swap, nil } -func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) { +func callSVMon(ctx context.Context, virt bool) (*VirtualMemoryStat, *SwapMemoryStat, error) { out, err := invoke.CommandWithContext(ctx, "svmon", "-G") if err != nil { return nil, nil, err @@ -45,7 +45,7 @@ func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) vmem := &VirtualMemoryStat{} swap := &SwapMemoryStat{} for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "memory") { + if virt && strings.HasPrefix(line, "memory") { p := strings.Fields(line) if len(p) > 2 { if t, err := strconv.ParseUint(p[1], 10, 64); err == nil { diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go index d9cae7116b24..a6deddebdd83 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go @@ -8,8 +8,9 @@ import ( "errors" "unsafe" - "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { @@ -85,7 +86,6 @@ func SwapMemory() (*SwapMemoryStat, error) { } // Constants from vm/vm_param.h -// nolint: golint const ( XSWDEV_VERSION11 = 1 XSWDEV_VERSION = 2 diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go index 4666cbd01e8b..522cfd1b38df 100644 --- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go @@ -77,26 +77,40 @@ func SwapMemory() (*SwapMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // Use the performance counter to get the swap usage percentage + counter, err := common.NewWin32PerformanceCounter("swap_percentage", `\Paging File(_Total)\% Usage`) + if err != nil { + return nil, err + } + usedPercent, err := counter.GetValue() + if err != nil { + return nil, err + } + + // Get total memory from performance information var perfInfo performanceInformation perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) if mem == 0 { return nil, windows.GetLastError() } - tot := perfInfo.commitLimit * perfInfo.pageSize - used := perfInfo.commitTotal * perfInfo.pageSize - free := tot - used - var usedPercent float64 - if tot == 0 { - usedPercent = 0 + totalPhys := perfInfo.physicalTotal * perfInfo.pageSize + totalSys := perfInfo.commitLimit * perfInfo.pageSize + total := totalSys - totalPhys + + var used uint64 + if total > 0 { + used = uint64(0.01 * usedPercent * float64(total)) } else { - usedPercent = float64(used) / float64(tot) * 100 + usedPercent = 0.0 + used = 0 } + ret := &SwapMemoryStat{ - Total: tot, + Total: total, Used: used, - Free: free, - UsedPercent: usedPercent, + Free: total - used, + UsedPercent: common.Round(usedPercent, 1), } return ret, nil diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go index df59abecbe1a..08a100d811ae 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go @@ -117,7 +117,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -286,11 +286,11 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -305,8 +305,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -317,14 +317,14 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go index f86b7bf9e3b8..8f3f4d386ddf 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go @@ -143,8 +143,8 @@ func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { return output } -func (min mapInterfaceNameUsage) isTruncated() bool { - for _, usage := range min { +func (mapi mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range mapi { if usage > 1 { return true } @@ -152,9 +152,9 @@ func (min mapInterfaceNameUsage) isTruncated() bool { return false } -func (min mapInterfaceNameUsage) notTruncated() []string { +func (mapi mapInterfaceNameUsage) notTruncated() []string { output := make([]string, 0) - for ifaceName, usage := range min { + for ifaceName, usage := range mapi { if usage == 1 { output = append(output, ifaceName) } @@ -247,7 +247,7 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } } - if pernic == false { + if !pernic { return getIOCountersAll(ret) } return ret, nil diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go index e62deeeed361..a765e216b880 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go @@ -49,11 +49,11 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return []ConnectionStat{}, common.ErrNotImplementedError } -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -68,8 +68,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -80,14 +80,14 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go index 155a49c40452..ccaab73e0b3e 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go @@ -83,7 +83,7 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, ret = append(ret, n) } - if pernic == false { + if !pernic { return getIOCountersAll(ret) } @@ -96,7 +96,7 @@ func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { } func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) + return IOCountersWithContext(ctx, pernic) } func FilterCounters() ([]FilterStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go index 6db04b6279bf..2c79facb0573 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -238,14 +238,14 @@ func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { } stats := make([]FilterStat, 0, 1) - max, err := common.ReadInts(maxfile) + maxConn, err := common.ReadInts(maxfile) if err != nil { return nil, err } payload := FilterStat{ ConnTrackCount: count[0], - ConnTrackMax: max[0], + ConnTrackMax: maxConn[0], } stats = append(stats, payload) @@ -396,12 +396,12 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, // Return a list of network connections opened returning at most `max` // connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } // Return a list of network connections opened, omitting `Uids`. @@ -415,8 +415,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } // Return a list of network connections opened by a process. @@ -437,23 +437,23 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid } // Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMax(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, false) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, true) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int, skipUids bool) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int, skipUids bool) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -462,16 +462,16 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p var err error var inodes map[string][]inodeMap if pid == 0 { - inodes, err = getProcInodesAllWithContext(ctx, root, max) + inodes, err = getProcInodesAllWithContext(ctx, root, maxConn) } else { - inodes, err = getProcInodes(root, pid, max) + inodes, err = getProcInodes(root, pid, maxConn) if len(inodes) == 0 { // no connection for the pid return []ConnectionStat{}, nil } } if err != nil { - return nil, fmt.Errorf("cound not get pid(s), %d: %w", pid, err) + return nil, fmt.Errorf("could not get pid(s), %d: %w", pid, err) } return statsFromInodesWithContext(ctx, root, pid, tmap, inodes, skipUids) } @@ -543,7 +543,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } // getProcInodes returns fd of the pid. -func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { +func getProcInodes(root string, pid int32, maxConn int) (map[string][]inodeMap, error) { ret := make(map[string][]inodeMap) dir := fmt.Sprintf("%s/%d/fd", root, pid) @@ -552,7 +552,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, err } defer f.Close() - dirEntries, err := f.ReadDir(max) + dirEntries, err := f.ReadDir(maxConn) if err != nil { return ret, err } @@ -573,7 +573,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro if !ok { ret[inode] = make([]inodeMap, 0) } - fd, err := strconv.Atoi(dirEntry.Name()) + fd, err := strconv.ParseInt(dirEntry.Name(), 10, 32) if err != nil { continue } @@ -668,11 +668,11 @@ func (p *process) fillFromStatus(ctx context.Context) error { return nil } -func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { - return getProcInodesAllWithContext(context.Background(), root, max) +func getProcInodesAll(root string, maxConn int) (map[string][]inodeMap, error) { + return getProcInodesAllWithContext(context.Background(), root, maxConn) } -func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) { +func getProcInodesAllWithContext(ctx context.Context, root string, maxConn int) (map[string][]inodeMap, error) { pids, err := PidsWithContext(ctx) if err != nil { return nil, err @@ -680,7 +680,7 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map ret := make(map[string][]inodeMap) for _, pid := range pids { - t, err := getProcInodes(root, pid, max) + t, err := getProcInodes(root, pid, maxConn) if err != nil { // skip if permission error or no longer exists if os.IsPermission(err) || os.IsNotExist(err) || errors.Is(err, io.EOF) { @@ -858,7 +858,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in if len(tokens) < 6 { continue } - st, err := strconv.Atoi(tokens[4]) + st, err := strconv.ParseInt(tokens[4], 10, 32) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go index 50e37fe4029c..7fae18b936cf 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go @@ -24,9 +24,9 @@ func ParseNetstat(output string, mode string, exists := make([]string, 0, len(lines)-1) - columns := 6 - if mode == "ind" { - columns = 10 + columns := 9 + if mode == "inb" { + columns = 6 } for _, line := range lines { values := strings.Fields(line) @@ -49,18 +49,23 @@ func ParseNetstat(output string, mode string, parsed := make([]uint64, 0, 8) var vv []string - if mode == "inb" { + switch mode { + case "inb": vv = []string{ values[base+3], // BytesRecv values[base+4], // BytesSent } - } else { + case "ind": vv = []string{ values[base+3], // Ipkts - values[base+4], // Ierrs + values[base+4], // Idrop values[base+5], // Opkts + values[base+6], // Odrops + } + case "ine": + vv = []string{ + values[base+4], // Ierrs values[base+6], // Oerrs - values[base+8], // Drops } } for _, target := range vv { @@ -81,16 +86,19 @@ func ParseNetstat(output string, mode string, if !present { n = IOCountersStat{Name: values[0]} } - if mode == "inb" { + + switch mode { + case "inb": n.BytesRecv = parsed[0] n.BytesSent = parsed[1] - } else { + case "ind": n.PacketsRecv = parsed[0] - n.Errin = parsed[1] + n.Dropin = parsed[1] n.PacketsSent = parsed[2] - n.Errout = parsed[3] - n.Dropin = parsed[4] - n.Dropout = parsed[4] + n.Dropout = parsed[3] + case "ine": + n.Errin = parsed[0] + n.Errout = parsed[1] } iocs[n.Name] = n @@ -115,6 +123,10 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, if err != nil { return nil, err } + out3, err := invoke.CommandWithContext(ctx, netstat, "-ine") + if err != nil { + return nil, err + } iocs := make(map[string]IOCountersStat) lines := strings.Split(string(out), "\n") @@ -128,6 +140,10 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, if err != nil { return nil, err } + err = ParseNetstat(string(out3), "ine", iocs) + if err != nil { + return nil, err + } for _, ioc := range iocs { ret = append(ret, ioc) @@ -239,7 +255,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go index 71fc3b972a29..62f8907abf53 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go @@ -25,11 +25,11 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, // Return a list of network connections opened returning at most `max` // connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -109,11 +109,11 @@ func parseNetLine(line string) (ConnectionStat, error) { f[7] = "unix" } - pid, err := strconv.Atoi(f[1]) + pid, err := strconv.ParseInt(f[1], 10, 32) if err != nil { return ConnectionStat{}, err } - fd, err := strconv.Atoi(strings.Trim(f[3], "u")) + fd, err := strconv.ParseInt(strings.Trim(f[3], "u"), 10, 32) if err != nil { return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) } @@ -157,7 +157,7 @@ func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { if err != nil { return Addr{}, fmt.Errorf("wrong addr, %s", l) } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -180,11 +180,11 @@ func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { } // Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMax(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -199,8 +199,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -211,14 +211,14 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go index 12f62cda05bc..f1145feab291 100644 --- a/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go @@ -279,11 +279,11 @@ func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error // Return a list of network connections opened returning at most `max` // connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -298,8 +298,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -310,15 +310,15 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go index 4082fc95a2c6..d73f1f972fa9 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -103,10 +103,18 @@ type RlimitStat struct { } type IOCountersStat struct { - ReadCount uint64 `json:"readCount"` + // ReadCount is a number of read I/O operations such as syscalls. + ReadCount uint64 `json:"readCount"` + // WriteCount is a number of read I/O operations such as syscalls. WriteCount uint64 `json:"writeCount"` - ReadBytes uint64 `json:"readBytes"` + // ReadBytes is a number of all I/O read in bytes. This includes disk I/O on Linux and Windows. + ReadBytes uint64 `json:"readBytes"` + // WriteBytes is a number of all I/O write in bytes. This includes disk I/O on Linux and Windows. WriteBytes uint64 `json:"writeBytes"` + // DiskReadBytes is a number of disk I/O write in bytes. Currently only Linux has this value. + DiskReadBytes uint64 `json:"diskReadBytes"` + // DiskWriteBytes is a number of disk I/O read in bytes. Currently only Linux has this value. + DiskWriteBytes uint64 `json:"diskWriteBytes"` } type NumCtxSwitchesStat struct { @@ -317,7 +325,11 @@ func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 if delta == 0 { return 0 } - delta_proc := t2.Total() - t1.Total() + // https://github.com/giampaolo/psutil/blob/c034e6692cf736b5e87d14418a8153bb03f6cf42/psutil/__init__.py#L1064 + delta_proc := (t2.User - t1.User) + (t2.System - t1.System) + if delta_proc <= 0 { + return 0 + } overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) return overall_percent } @@ -539,8 +551,8 @@ func (p *Process) Connections() ([]net.ConnectionStat, error) { } // ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`. -func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { - return p.ConnectionsMaxWithContext(context.Background(), max) +func (p *Process) ConnectionsMax(maxConn int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), maxConn) } // MemoryMaps get memory maps from /proc/(pid)/smaps diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go index 5231007c3c37..66b3684eae4a 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -193,24 +193,24 @@ func convertCPUTimes(s string) (ret float64, err error) { _t := strings.Split(s, ":") switch len(_t) { case 3: - hour, err := strconv.Atoi(_t[0]) + hour, err := strconv.ParseInt(_t[0], 10, 32) if err != nil { return ret, err } - t += hour * 60 * 60 * clockTicks + t += int(hour) * 60 * 60 * clockTicks - mins, err := strconv.Atoi(_t[1]) + mins, err := strconv.ParseInt(_t[1], 10, 32) if err != nil { return ret, err } - t += mins * 60 * clockTicks + t += int(mins) * 60 * clockTicks _tmp = _t[2] case 2: - mins, err := strconv.Atoi(_t[0]) + mins, err := strconv.ParseInt(_t[0], 10, 32) if err != nil { return ret, err } - t += mins * 60 * clockTicks + t += int(mins) * 60 * clockTicks _tmp = _t[1] case 1, 0: _tmp = s @@ -225,10 +225,10 @@ func convertCPUTimes(s string) (ret float64, err error) { if err != nil { return ret, err } - h, err := strconv.Atoi(_t[0]) - t += h * clockTicks - h, err = strconv.Atoi(_t[1]) - t += h + h, err := strconv.ParseInt(_t[0], 10, 32) + t += int(h) * clockTicks + h, err = strconv.ParseInt(_t[1], 10, 32) + t += int(h) return float64(t) / float64(clockTicks), nil } @@ -252,8 +252,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func ProcessesWithContext(ctx context.Context) ([]*Process, error) { diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go index 090e21e0c76c..d498c9377a06 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go @@ -20,18 +20,25 @@ func (p *Process) CwdWithContext(ctx context.Context) (string, error) { func (p *Process) ExeWithContext(ctx context.Context) (string, error) { out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") if err != nil { - return "", fmt.Errorf("bad call to lsof: %s", err) + return "", fmt.Errorf("bad call to lsof: %w", err) } txtFound := 0 lines := strings.Split(string(out), "\n") + fallback := "" for i := 1; i < len(lines); i++ { if lines[i] == "ftxt" { txtFound++ + if txtFound == 1 { + fallback = lines[i-1][1:] + } if txtFound == 2 { return lines[i-1][1:], nil } } } + if fallback != "" { + return fallback, nil + } return "", fmt.Errorf("missing txt data returned by lsof") } @@ -104,15 +111,15 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e if err != nil { return nil, err } - rss, err := strconv.Atoi(r[0][0]) + rss, err := strconv.ParseInt(r[0][0], 10, 64) if err != nil { return nil, err } - vms, err := strconv.Atoi(r[0][1]) + vms, err := strconv.ParseInt(r[0][1], 10, 64) if err != nil { return nil, err } - pagein, err := strconv.Atoi(r[0][2]) + pagein, err := strconv.ParseInt(r[0][2], 10, 64) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go index 23793e92c506..e5410ea049c5 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go @@ -166,7 +166,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go index 3d21183d6561..436dcf030053 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -6,14 +6,16 @@ package process import ( "bytes" "context" + "errors" "path/filepath" "strconv" "strings" + "golang.org/x/sys/unix" + cpu "github.com/shirou/gopsutil/v4/cpu" "github.com/shirou/gopsutil/v4/internal/common" net "github.com/shirou/gopsutil/v4/net" - "golang.org/x/sys/unix" ) func pidsWithContext(ctx context.Context) ([]int32, error) { @@ -83,10 +85,7 @@ func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { return "", err } ret := strings.FieldsFunc(string(buf), func(r rune) bool { - if r == '\u0000' { - return true - } - return false + return r == '\u0000' }) return strings.Join(ret, " "), nil @@ -289,8 +288,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func ProcessesWithContext(ctx context.Context) ([]*Process, error) { @@ -331,7 +330,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go index dbb3baa3e756..3dc301c027ff 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go @@ -2,7 +2,7 @@ //go:build freebsd && arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs process/types_freebsd.go +// cgo -godefs types_freebsd.go package process @@ -82,14 +82,14 @@ type Rlimit struct { type KinfoProc struct { Structsize int32 Layout int32 - Args *int64 /* pargs */ - Paddr *int64 /* proc */ - Addr *int64 /* user */ - Tracep *int64 /* vnode */ - Textvp *int64 /* vnode */ - Fd *int64 /* filedesc */ - Vmspace *int64 /* vmspace */ - Wchan *byte + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 Pid int32 Ppid int32 Pgid int32 @@ -140,7 +140,7 @@ type KinfoProc struct { Wmesg [9]uint8 Login [18]uint8 Lockname [9]uint8 - Comm [20]int8 + Comm [20]int8 // changed from uint8 by hand Emul [17]uint8 Loginclass [18]uint8 Moretdname [4]uint8 @@ -159,11 +159,12 @@ type KinfoProc struct { Pri Priority Rusage Rusage Rusage_ch Rusage - Pcb *int64 /* pcb */ - Kstack *byte - Udata *byte - Tdaddr *int64 /* thread */ - Spareptrs [6]*byte + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Pd int64 /* pwddesc, not accurate */ + Spareptrs [5]int64 Sparelongs [12]int64 Sflag int64 Tdflags int64 @@ -195,7 +196,7 @@ type KinfoVmentry struct { Vn_rdev_freebsd11 uint32 Vn_mode uint16 Status uint16 - Vn_fsid uint64 + Type_spec [8]byte Vn_rdev uint64 X_kve_ispare [8]int32 Path [1024]uint8 diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go index 2151ed5c8462..7aff0448dfed 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -373,8 +373,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { @@ -399,7 +399,9 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M // function of parsing a block getBlock := func(firstLine []string, block []string) (MemoryMapsStat, error) { m := MemoryMapsStat{} - m.Path = firstLine[len(firstLine)-1] + if len(firstLine) >= 6 { + m.Path = strings.Join(firstLine[5:], " ") + } for _, line := range block { if strings.Contains(line, "VmFlags") { @@ -727,8 +729,12 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e case "syscw": ret.WriteCount = t case "read_bytes": - ret.ReadBytes = t + ret.DiskReadBytes = t case "write_bytes": + ret.DiskWriteBytes = t + case "rchar": + ret.ReadBytes = t + case "wchar": ret.WriteBytes = t } } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go index 7cd8ca736429..e2d0ab462263 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "path/filepath" @@ -68,7 +69,12 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { } func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError + mib := []int32{CTLKern, KernProcCwd, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + return common.ByteToString(buf), nil } func (p *Process) ExeWithContext(ctx context.Context) (string, error) { @@ -299,7 +305,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } @@ -338,7 +344,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go index e3c5c2b5a1df..5b84706a7cfe 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go index beb7c9b0b4da..3229bb32c287 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go @@ -12,6 +12,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go index ff082f43f87d..6f74ce756373 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go index e180ba359959..910454562581 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go index c53924b6f824..e3e0d36a09e8 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go index 726758cae958..c82e54a75bcc 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go @@ -166,7 +166,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go index 04f86f16b5e1..5c8d4d3b1efa 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -181,7 +181,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go index f3111649a6c7..52e1086f7805 100644 --- a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -744,7 +744,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index f124bfa15544..013173af5e9b 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.21.9 +1.22.6 diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 18154c638823..21407797416e 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -41,6 +41,15 @@ coverage: TEST_FREELIST_TYPE=array go test -v -timeout 30m \ -coverprofile cover-freelist-array.out -covermode atomic +BOLT_CMD=bbolt + +build: + go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD} + +.PHONY: clean +clean: # Clean binaries + rm -f ./bin/${BOLT_CMD} + .PHONY: gofail-enable gofail-enable: install-gofail gofail enable . @@ -61,3 +70,7 @@ test-failpoint: @echo "[failpoint] array freelist test" TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint +.PHONY: test-robustness # Running robustness tests requires root permission +test-robustness: + go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + go test -v ${TESTFLAGS} ./tests/robustness -test.root diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 4175bdf3dde9..822798e41a5b 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -524,7 +524,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) + return fmt.Errorf("unmap error: %v", err.Error()) } return nil @@ -571,7 +571,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { - return fmt.Errorf("munlock error: " + err.Error()) + return fmt.Errorf("munlock error: %v", err.Error()) } return nil } @@ -580,7 +580,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { - return fmt.Errorf("mlock error: " + err.Error()) + return fmt.Errorf("mlock error: %v", err.Error()) } return nil } @@ -1159,6 +1159,8 @@ func (db *DB) grow(sz int) error { // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { if runtime.GOOS != "windows" { + // gofail: var resizeFileError string + // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("file resize error: %s", err) } diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go index 61d43f81b46d..dffc7bc749b5 100644 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -252,6 +252,14 @@ func (f *freelist) rollback(txid txid) { } // Remove pages from pending list and mark as free if allocated by txid. delete(f.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range f.allocs { + if tid == txid { + delete(f.allocs, pgid) + } + } + f.mergeSpans(m) } diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 2fac8c0a7820..766395de3be7 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -1,6 +1,7 @@ package bbolt import ( + "errors" "fmt" "io" "os" @@ -185,6 +186,10 @@ func (tx *Tx) Commit() error { // If the high water mark has moved up then attempt to grow the database. if tx.meta.pgid > opgid { + _ = errors.New("") + // gofail: var lackOfDiskSpace string + // tx.rollback() + // return errors.New(lackOfDiskSpace) if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { tx.rollback() return err @@ -470,6 +475,7 @@ func (tx *Tx) write() error { // Ignore file sync if flag is set on DB. if !tx.db.NoSync || IgnoreNoSync { + // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { return err } @@ -507,6 +513,7 @@ func (tx *Tx) writeMeta() error { return err } if !tx.db.NoSync || IgnoreNoSync { + // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { return err } diff --git a/vendor/modules.txt b/vendor/modules.txt index ff9365f2c4c9..49e7bb611899 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -980,7 +980,7 @@ github.com/gorilla/websocket # github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 ## explicit; go 1.17 github.com/grafana/cloudflare-go -# github.com/grafana/dskit v0.0.0-20240930165212-f52de24af9bc +# github.com/grafana/dskit v0.0.0-20241007172036-53283a0f6b41 ## explicit; go 1.21 github.com/grafana/dskit/aws github.com/grafana/dskit/backoff @@ -1031,7 +1031,7 @@ github.com/grafana/go-gelf/v2/gelf # github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache -# github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 +# github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 ## explicit; go 1.13 github.com/grafana/jsonparser # github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f => ./pkg/push @@ -1527,8 +1527,8 @@ github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v5 -# github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 -## explicit; go 1.20 +# github.com/shirou/gopsutil/v4 v4.24.8 +## explicit; go 1.18 github.com/shirou/gopsutil/v4/common github.com/shirou/gopsutil/v4/cpu github.com/shirou/gopsutil/v4/internal/common @@ -1669,8 +1669,8 @@ github.com/yuin/gopher-lua/pm # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.etcd.io/bbolt v1.3.10 -## explicit; go 1.21 +# go.etcd.io/bbolt v1.3.11 +## explicit; go 1.22 go.etcd.io/bbolt # go.etcd.io/etcd/api/v3 v3.5.4 ## explicit; go 1.16