diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 2b6fb69076ca..b4c223478cc6 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,5 +1,5 @@ { - "image": "grafana/loki-build-image:0.33.6", + "image": "grafana/loki-build-image:0.34.0", "containerEnv": { "BUILD_IN_CONTAINER": "false" }, diff --git a/.github/jsonnetfile.json b/.github/jsonnetfile.json index 130eaeb4e984..bb6fd2eb41c9 100644 --- a/.github/jsonnetfile.json +++ b/.github/jsonnetfile.json @@ -8,7 +8,7 @@ "subdir": "workflows" } }, - "version": "98ce96e408db867d64fb95b59a99c24440ddf441" + "version": "d900569c04b53e02de6ef208fa77cba41ec5f709" } ], "legacyImports": true diff --git a/.github/jsonnetfile.lock.json b/.github/jsonnetfile.lock.json index 172082408f8c..7c45536e4f49 100644 --- a/.github/jsonnetfile.lock.json +++ b/.github/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "workflows" } }, - "version": "98ce96e408db867d64fb95b59a99c24440ddf441", - "sum": "pqEiutdl50ghtCY0wReq+Xa3AymHEyMa1OJQvRQXINI=" + "version": "d900569c04b53e02de6ef208fa77cba41ec5f709", + "sum": "+uAzU+b+aJtp3k+JX5mDxuh8LNY23+cHvUOwzCQ8CS8=" } ], "legacyImports": false diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet index 9cf613dfc042..6c16af50ad74 100644 --- a/.github/release-workflows.jsonnet +++ b/.github/release-workflows.jsonnet @@ -31,7 +31,7 @@ local weeklyImageJobs = { local buildImageVersion = std.extVar('BUILD_IMAGE_VERSION'); local buildImage = 'grafana/loki-build-image:%s' % buildImageVersion; -local golangCiLintVersion = 'v1.55.1'; +local golangCiLintVersion = 'v1.60.3'; local imageBuildTimeoutMin = 60; local imagePrefix = 'grafana'; diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet index d274d21a0571..b4d7b24246cf 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet @@ -9,13 +9,13 @@ releasePRWorkflow: function( branches=['release-[0-9]+.[0-9]+.x', 'k[0-9]+'], buildArtifactsBucket='loki-build-artifacts', - buildImage='grafana/loki-build-image:0.33.0', + buildImage='grafana/loki-build-image:0.34.0', changelogPath='CHANGELOG.md', checkTemplate='./.github/workflows/check.yml', distMakeTargets=['dist', 'packages'], dryRun=false, dockerUsername='grafana', - golangCiLintVersion='v1.55.1', + golangCiLintVersion='v1.60.3', imageBuildTimeoutMin=25, imageJobs={}, imagePrefix='grafana', @@ -139,7 +139,7 @@ type: 'boolean', }, golang_ci_lint_version: { - default: 'v1.55.1', + default: 'v1.60.3', description: 'version of golangci-lint to use', required: false, type: 'string', @@ -190,7 +190,7 @@ type: 'boolean', }, golang_ci_lint_version: { - default: 'v1.55.1', + default: 'v1.60.3', description: 'version of golangci-lint to use', required: false, type: 'string', diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 8e63a1e5d9ba..b9f7b83d5f97 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -2,8 +2,8 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.33.6" - "golang_ci_lint_version": "v1.55.1" + "build_image": "grafana/loki-build-image:0.34.0" + "golang_ci_lint_version": "v1.60.3" "release_lib_ref": "main" "skip_validation": false "use_github_app_token": true @@ -12,4 +12,4 @@ "pull_request": {} "push": "branches": - - "main" \ No newline at end of file + - "main" diff --git a/.github/workflows/images.yml b/.github/workflows/images.yml index f6d8ca6e08aa..97b40cb2e05b 100644 --- a/.github/workflows/images.yml +++ b/.github/workflows/images.yml @@ -2,8 +2,8 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.33.6" - "golang_ci_lint_version": "v1.55.1" + "build_image": "grafana/loki-build-image:0.34.0" + "golang_ci_lint_version": "v1.60.3" "release_lib_ref": "main" "skip_validation": false "use_github_app_token": true @@ -430,4 +430,4 @@ "permissions": "contents": "write" "id-token": "write" - "pull-requests": "write" \ No newline at end of file + "pull-requests": "write" diff --git a/.github/workflows/lint-jsonnet.yml b/.github/workflows/lint-jsonnet.yml index 37016f255ca5..e1fbf786f616 100644 --- a/.github/workflows/lint-jsonnet.yml +++ b/.github/workflows/lint-jsonnet.yml @@ -14,7 +14,7 @@ jobs: - name: setup go uses: actions/setup-go@v5 with: - go-version: '1.22.2' + go-version: '1.23.1' - name: setup jsonnet run: | go install github.com/google/go-jsonnet/cmd/jsonnet@v0.20.0 diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index 765744e35eb5..a5c52d0fb2ee 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -16,8 +16,8 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.33.6" - golang_ci_lint_version: "v1.55.1" + build_image: "grafana/loki-build-image:0.34.0" + golang_ci_lint_version: "v1.60.3" release_lib_ref: "main" skip_validation: false use_github_app_token: true @@ -143,7 +143,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.33.6" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.0" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages @@ -828,4 +828,4 @@ name: "Prepare Minor Release PR from Weekly" permissions: contents: "write" id-token: "write" - pull-requests: "write" \ No newline at end of file + pull-requests: "write" diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index 96f6d86e2775..800f9afd7106 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -16,8 +16,8 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.33.6" - golang_ci_lint_version: "v1.55.1" + build_image: "grafana/loki-build-image:0.34.0" + golang_ci_lint_version: "v1.60.3" release_lib_ref: "main" skip_validation: false use_github_app_token: true @@ -143,7 +143,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.33.6" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.0" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages @@ -828,4 +828,4 @@ name: "Prepare Patch Release PR" permissions: contents: "write" id-token: "write" - pull-requests: "write" \ No newline at end of file + pull-requests: "write" diff --git a/.github/workflows/promtail-windows-test.yml b/.github/workflows/promtail-windows-test.yml index cb47ae283161..90ccf72ad1be 100644 --- a/.github/workflows/promtail-windows-test.yml +++ b/.github/workflows/promtail-windows-test.yml @@ -10,7 +10,7 @@ jobs: runs-on: windows-latest strategy: matrix: - go-version: [ '1.21.9', '1.22.2' ] + go-version: [ '1.22.2', '1.23.1' ] steps: - uses: actions/checkout@v4 - name: Setup Go ${{ matrix.go-version }} @@ -21,4 +21,4 @@ jobs: - name: Display Go version run: go version - name: Run promtail tests - run: go test .\clients\pkg\promtail\targets\windows\... -v \ No newline at end of file + run: go test .\clients\pkg\promtail\targets\windows\... -v diff --git a/.github/workflows/verify-release-workflow.yaml b/.github/workflows/verify-release-workflow.yaml index 818269c6f0ea..dbd693897eb0 100644 --- a/.github/workflows/verify-release-workflow.yaml +++ b/.github/workflows/verify-release-workflow.yaml @@ -8,11 +8,11 @@ jobs: - name: setup go uses: actions/setup-go@v5 with: - go-version: '1.22.2' + go-version: '1.23.1' - name: setup jsonnet run: | go install github.com/google/go-jsonnet/cmd/jsonnet@v0.20.0 go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@v0.5.1 - name: Check that the release workflows have been updated properly run: | - make BUILD_IN_CONTAINER=false release-workflows-check \ No newline at end of file + make BUILD_IN_CONTAINER=false release-workflows-check diff --git a/.golangci.yml b/.golangci.yml index e6475895ad94..ae10a6ba210b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,13 +4,13 @@ # options for analysis running run: # define go version - go: "1.20" + go: "1.23" # default concurrency is a available CPU number concurrency: 16 # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 5m + timeout: 10m # exit code when at least one issue was found, default is 1 issues-exit-code: 1 @@ -24,28 +24,12 @@ run: - cgo - promtail_journal_enabled - integration - - # which dirs to skip: they won't be analyzed; - # can use regexp here: generated.*, regexp is applied on full path; - # default value is empty list, but next dirs are always skipped independently - # from this option's value: - # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ - skip-dirs: - - win_eventlog$ - - operator - # which files to skip: they will be analyzed, but issues from them - # won't be reported. Default value is empty list, but there is - # no need to include all autogenerated files, we confidently recognize - # autogenerated files. If it's not please let us know. - skip-files: - - .*.pb.go - - .*.y.go - - .*.rl.go - - .*.deepcopy.go + # output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number" - format: colored-line-number + formats: + # colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number" + - format: colored-line-number # print lines of code with issue, default is true print-issued-lines: true @@ -101,3 +85,20 @@ issues: linters: - goconst fix: true + # which dirs to skip: they won't be analyzed; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but next dirs are always skipped independently + # from this option's value: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + exclude-dirs: + - win_eventlog$ + - operator + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + exclude-files: + - .*.pb.go + - .*.y.go + - .*.rl.go + - .*.deepcopy.go diff --git a/Makefile b/Makefile index 476b08b29c1f..b75b4f5d5ced 100644 --- a/Makefile +++ b/Makefile @@ -37,8 +37,8 @@ DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) BUILD_IN_CONTAINER ?= true # ensure you run `make release-workflows` after changing this -BUILD_IMAGE_VERSION ?= 0.33.6 -GO_VERSION := 1.22.6 +BUILD_IMAGE_VERSION ?= 0.34.0 +GO_VERSION := 1.23.1 # Docker image info IMAGE_PREFIX ?= grafana diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile index 9de291514c9a..672556240f19 100644 --- a/clients/cmd/docker-driver/Dockerfile +++ b/clients/cmd/docker-driver/Dockerfile @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 +ARG BUILD_IMAGE=grafana/loki-build-image:0.34.0 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . @@ -9,7 +9,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false clients/cmd/docker-driver/docker-driver -FROM alpine:3.20.2 +FROM alpine:3.20.3 RUN apk add --update --no-cache ca-certificates tzdata COPY --from=build /src/loki/clients/cmd/docker-driver/docker-driver /bin/docker-driver WORKDIR /bin/ diff --git a/clients/cmd/fluent-bit/Dockerfile b/clients/cmd/fluent-bit/Dockerfile index aadd28ce83b6..ae361b864429 100644 --- a/clients/cmd/fluent-bit/Dockerfile +++ b/clients/cmd/fluent-bit/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.6-bullseye AS builder +FROM golang:1.23-bullseye AS builder COPY . /src diff --git a/clients/cmd/promtail/Dockerfile b/clients/cmd/promtail/Dockerfile index 3c9088bb83ba..58e05719ac1c 100644 --- a/clients/cmd/promtail/Dockerfile +++ b/clients/cmd/promtail/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION}-bookworm as build COPY . /src/loki diff --git a/clients/cmd/promtail/Dockerfile.arm32 b/clients/cmd/promtail/Dockerfile.arm32 index b9f4a26dc8f0..cf517308c531 100644 --- a/clients/cmd/promtail/Dockerfile.arm32 +++ b/clients/cmd/promtail/Dockerfile.arm32 @@ -1,4 +1,4 @@ -FROM golang:1.22.6-bookworm as build +FROM golang:1.23-bookworm as build COPY . /src/loki WORKDIR /src/loki diff --git a/clients/cmd/promtail/Dockerfile.cross b/clients/cmd/promtail/Dockerfile.cross index 5bf89e71fa16..8459b7affb58 100644 --- a/clients/cmd/promtail/Dockerfile.cross +++ b/clients/cmd/promtail/Dockerfile.cross @@ -1,5 +1,5 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 -ARG GO_VERSION=1.22 +ARG BUILD_IMAGE=grafana/loki-build-image:0.34.0 +ARG GO_VERSION=1.23 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f clients/cmd/promtail/Dockerfile . diff --git a/clients/cmd/promtail/Dockerfile.debug b/clients/cmd/promtail/Dockerfile.debug index 24b6060241f4..2d48eb77a035 100644 --- a/clients/cmd/promtail/Dockerfile.debug +++ b/clients/cmd/promtail/Dockerfile.debug @@ -2,14 +2,14 @@ # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f clients/cmd/promtail/Dockerfile.debug . -FROM grafana/loki-build-image:0.33.6 as build +FROM grafana/loki-build-image:0.34.0 AS build ARG GOARCH="amd64" COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false PROMTAIL_JOURNAL_ENABLED=true promtail-debug -FROM alpine:3.20.2 +FROM alpine:3.20.3 RUN apk add --update --no-cache ca-certificates tzdata COPY --from=build /src/loki/clients/cmd/promtail/promtail-debug /usr/bin/promtail-debug COPY --from=build /usr/bin/dlv /usr/bin/dlv diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile index 999434d075a8..52a66fea9a0c 100644 --- a/cmd/logcli/Dockerfile +++ b/cmd/logcli/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build COPY . /src/loki diff --git a/cmd/logql-analyzer/Dockerfile b/cmd/logql-analyzer/Dockerfile index 53ba7bee94b4..6cfb7ad795e4 100644 --- a/cmd/logql-analyzer/Dockerfile +++ b/cmd/logql-analyzer/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build COPY . /src/loki diff --git a/cmd/loki-canary-boringcrypto/Dockerfile b/cmd/loki-canary-boringcrypto/Dockerfile index e69be2c0aeb6..48a10e92814b 100644 --- a/cmd/loki-canary-boringcrypto/Dockerfile +++ b/cmd/loki-canary-boringcrypto/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build COPY . /src/loki diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile index f0dcf02d5d81..2833d60590ca 100644 --- a/cmd/loki-canary/Dockerfile +++ b/cmd/loki-canary/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build COPY . /src/loki diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross index 078cb62a3726..deaafce0ddba 100644 --- a/cmd/loki-canary/Dockerfile.cross +++ b/cmd/loki-canary/Dockerfile.cross @@ -1,5 +1,5 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 -ARG GO_VERSION=1.22 +ARG BUILD_IMAGE=grafana/loki-build-image:0.34.0 +ARG GO_VERSION=1.23 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile index 521c59159573..521a897d645d 100644 --- a/cmd/loki/Dockerfile +++ b/cmd/loki/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build COPY . /src/loki diff --git a/cmd/loki/Dockerfile.cross b/cmd/loki/Dockerfile.cross index da9d358d28c5..97fba445ef2f 100644 --- a/cmd/loki/Dockerfile.cross +++ b/cmd/loki/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . diff --git a/cmd/loki/Dockerfile.debug b/cmd/loki/Dockerfile.debug index d2a3d7c4dbbe..30edf6416ec3 100644 --- a/cmd/loki/Dockerfile.debug +++ b/cmd/loki/Dockerfile.debug @@ -1,5 +1,5 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 -ARG GO_VERSION=1.22 +ARG BUILD_IMAGE=grafana/loki-build-image:0.34.0 +ARG GO_VERSION=1.23 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile.debug . diff --git a/cmd/migrate/Dockerfile b/cmd/migrate/Dockerfile index a24697a719f2..82a78a4782d1 100644 --- a/cmd/migrate/Dockerfile +++ b/cmd/migrate/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile index ea86fe0249ee..f2403d8df0e7 100644 --- a/cmd/querytee/Dockerfile +++ b/cmd/querytee/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build COPY . /src/loki diff --git a/cmd/querytee/Dockerfile.cross b/cmd/querytee/Dockerfile.cross index 478f69a67e3b..83795cd3dc28 100644 --- a/cmd/querytee/Dockerfile.cross +++ b/cmd/querytee/Dockerfile.cross @@ -1,8 +1,8 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 +ARG BUILD_IMAGE=grafana/loki-build-image:0.34.0 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index 05dc02244160..ec35fbb249f4 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -4,7 +4,7 @@ # tag of the Docker image in `../.drone/drone.jsonnet` and run `make drone`. # See ../docs/sources/community/maintaining/release-loki-build-image.md for instructions # on how to publish a new build image. -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 # Install helm (https://helm.sh/) and helm-docs (https://github.com/norwoodj/helm-docs) for generating Helm Chart reference. FROM golang:${GO_VERSION}-bookworm AS helm ARG TARGETARCH @@ -15,7 +15,7 @@ RUN BIN=$([ "$TARGETARCH" = "arm64" ] && echo "helm-docs_Linux_arm64" || echo "h curl -L "https://github.com/norwoodj/helm-docs/releases/download/v1.11.2/$BIN.tar.gz" | tar zx && \ install -t /usr/local/bin helm-docs -FROM alpine:3.20.2 AS lychee +FROM alpine:3.20.3 AS lychee ARG TARGETARCH ARG LYCHEE_VER="0.7.0" RUN apk add --no-cache curl && \ @@ -24,18 +24,18 @@ RUN apk add --no-cache curl && \ mv /tmp/lychee /usr/bin/lychee && \ rm -rf "/tmp/linux-$TARGETARCH" /tmp/lychee-$LYCHEE_VER.tgz -FROM alpine:3.20.2 AS golangci +FROM alpine:3.20.3 AS golangci RUN apk add --no-cache curl && \ cd / && \ - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.55.1 + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.60.3 -FROM alpine:3.20.2 AS buf +FROM alpine:3.20.3 AS buf ARG TARGETOS RUN apk add --no-cache curl && \ curl -sSL "https://github.com/bufbuild/buf/releases/download/v1.4.0/buf-$TARGETOS-$(uname -m)" -o "/usr/bin/buf" && \ chmod +x "/usr/bin/buf" -FROM alpine:3.20.2 AS docker +FROM alpine:3.20.3 AS docker RUN apk add --no-cache docker-cli docker-cli-buildx FROM golang:${GO_VERSION}-bookworm AS drone diff --git a/loki-build-image/README.md b/loki-build-image/README.md index 81bb708871f1..48106cb28c42 100644 --- a/loki-build-image/README.md +++ b/loki-build-image/README.md @@ -2,6 +2,11 @@ ## Versions +### 0.34.0 + +- Update to Go 1.23.1 +- Update to Alpine 3.20.3 + ### 0.33.6 - Update to go 1.22.6 diff --git a/pkg/canary/reader/reader.go b/pkg/canary/reader/reader.go index 88af34ce8e75..c98a7cab8fd7 100644 --- a/pkg/canary/reader/reader.go +++ b/pkg/canary/reader/reader.go @@ -390,6 +390,14 @@ func (r *Reader) run() { // or times out based on the above SetReadDeadline call. err := unmarshal.ReadTailResponseJSON(tailResponse, r.conn) if err != nil { + var e *websocket.CloseError + if errors.As(err, &e) && e.Text == "reached tail max duration limit" { + fmt.Fprintf(r.w, "tail max duration limit exceeded, will retry immediately: %s\n", err) + + r.closeAndReconnect() + continue + } + reason := "error reading websocket" if e, ok := err.(net.Error); ok && e.Timeout() { reason = fmt.Sprintf("timeout tailing new logs (timeout period: %.2fs)", timeoutInterval.Seconds()) diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index cbc6ff44b044..16d25c1ff5e8 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -540,6 +540,26 @@ func applyStorageConfig(cfg, defaults *ConfigWrapper) error { } } + if !reflect.DeepEqual(cfg.Common.Storage.AlibabaCloud, defaults.StorageConfig.AlibabaStorageConfig) { + configsFound++ + + applyConfig = func(r *ConfigWrapper) { + r.Ruler.StoreConfig.Type = "alibaba" + r.Ruler.StoreConfig.AlibabaCloud = r.Common.Storage.AlibabaCloud + r.StorageConfig.AlibabaStorageConfig = r.Common.Storage.AlibabaCloud + } + } + + if !reflect.DeepEqual(cfg.Common.Storage.COS, defaults.StorageConfig.COSConfig) { + configsFound++ + + applyConfig = func(r *ConfigWrapper) { + r.Ruler.StoreConfig.Type = "cos" + r.Ruler.StoreConfig.COS = r.Common.Storage.COS + r.StorageConfig.COSConfig = r.Common.Storage.COS + } + } + if !reflect.DeepEqual(cfg.Common.Storage.CongestionControl, defaults.StorageConfig.CongestionControl) { applyConfig = func(r *ConfigWrapper) { r.StorageConfig.CongestionControl = r.Common.Storage.CongestionControl diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index e8894d6329b7..5e1ad00bec50 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -219,12 +219,16 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig, config.StorageConfig.AWSStorageConfig) assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) }) t.Run("when multiple configs are provided, an error is returned", func(t *testing.T) { @@ -296,12 +300,17 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) + // should remain empty assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) }) t.Run("when common s3 storage config is provided (with session token), ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -356,12 +365,17 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) + // should remain empty assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) }) t.Run("when common gcs storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -392,12 +406,17 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) + // should remain empty assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) }) t.Run("when common azure storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -444,6 +463,8 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) // should remain empty assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) @@ -451,6 +472,8 @@ memberlist: assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) }) t.Run("when common bos storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -482,6 +505,8 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) // should remain empty assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) @@ -489,6 +514,8 @@ memberlist: assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) }) t.Run("when common swift storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { @@ -549,12 +576,103 @@ memberlist: assert.EqualValues(t, defaults.Ruler.StoreConfig.Azure, config.Ruler.StoreConfig.Azure) assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) + + // should remain empty + assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) + assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) + assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) + }) + + t.Run("when common alibaba storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { + configInput := `common: + storage: + alibabacloud: + bucket: testbucket + endpoint: https://example.com + access_key_id: abc123 + secret_access_key: def789` + + config, defaults := testContext(configInput, nil) + + assert.Equal(t, "alibaba", config.Ruler.StoreConfig.Type) + + for _, actual := range []alibaba.OssConfig{ + config.Ruler.StoreConfig.AlibabaCloud, + config.StorageConfig.AlibabaStorageConfig, + } { + assert.Equal(t, "testbucket", actual.Bucket) + assert.Equal(t, "https://example.com", actual.Endpoint) + assert.Equal(t, "abc123", actual.AccessKeyID) + assert.Equal(t, "def789", actual.SecretAccessKey) + } + + // should remain empty + assert.EqualValues(t, defaults.Ruler.StoreConfig.GCS, config.Ruler.StoreConfig.GCS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Azure, config.Ruler.StoreConfig.Azure) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.COS, config.Ruler.StoreConfig.COS) + + // should remain empty + assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) + assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) + assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) + assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.COSConfig, config.StorageConfig.COSConfig) + }) + + t.Run("when common cos storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { + configInput := `common: + storage: + cos: + bucketnames: testbucket + endpoint: https://example.com + region: test-region + access_key_id: abc123 + secret_access_key: def789` + + config, defaults := testContext(configInput, nil) + + assert.Equal(t, "cos", config.Ruler.StoreConfig.Type) + + for _, actual := range []ibmcloud.COSConfig{ + config.Ruler.StoreConfig.COS, + config.StorageConfig.COSConfig, + } { + assert.Equal(t, "testbucket", actual.BucketNames) + assert.Equal(t, "https://example.com", actual.Endpoint) + assert.Equal(t, "test-region", actual.Region) + assert.Equal(t, "abc123", actual.AccessKeyID) + assert.Equal(t, flagext.SecretWithValue("def789"), actual.SecretAccessKey) + } + + // should remain empty + assert.EqualValues(t, defaults.Ruler.StoreConfig.GCS, config.Ruler.StoreConfig.GCS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Azure, config.Ruler.StoreConfig.Azure) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.AlibabaCloud, config.Ruler.StoreConfig.AlibabaCloud) + // should remain empty assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) assert.EqualValues(t, defaults.StorageConfig.AWSStorageConfig.S3Config, config.StorageConfig.AWSStorageConfig.S3Config) assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.AlibabaStorageConfig, config.StorageConfig.AlibabaStorageConfig) }) t.Run("when common filesystem/local config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { diff --git a/pkg/querier/queryrange/detected_fields.go b/pkg/querier/queryrange/detected_fields.go index 9c1ecd0c8a8a..115ba9601573 100644 --- a/pkg/querier/queryrange/detected_fields.go +++ b/pkg/querier/queryrange/detected_fields.go @@ -27,55 +27,59 @@ func NewDetectedFieldsHandler( limitedHandler base.Handler, logHandler base.Handler, limits Limits, -) base.Middleware { - return base.MiddlewareFunc(func(next base.Handler) base.Handler { - return base.HandlerFunc( - func(ctx context.Context, req base.Request) (base.Response, error) { - r, ok := req.(*DetectedFieldsRequest) - if !ok { - return nil, httpgrpc.Errorf( - http.StatusBadRequest, - "invalid request type, expected *DetectedFieldsRequest", - ) - } +) base.Handler { + return base.HandlerFunc( + func(ctx context.Context, req base.Request) (base.Response, error) { + r, ok := req.(*DetectedFieldsRequest) + if !ok { + return nil, httpgrpc.Errorf( + http.StatusBadRequest, + "invalid request type, expected *DetectedFieldsRequest", + ) + } - resp, err := makeDownstreamRequest(ctx, limits, limitedHandler, logHandler, r) - if err != nil { - return nil, err - } + resp, err := makeDownstreamRequest(ctx, limits, limitedHandler, logHandler, r) + if err != nil { + return nil, err + } - re, ok := resp.(*LokiResponse) - if !ok || re.Status != "success" { - return resp, nil + re, ok := resp.(*LokiResponse) + if !ok || re.Status != "success" { + return resp, nil + } + + detectedFields := parseDetectedFields(r.FieldLimit, re.Data.Result) + fields := make([]*logproto.DetectedField, len(detectedFields)) + fieldCount := 0 + for k, v := range detectedFields { + p := v.parsers + if len(p) == 0 { + p = nil + } + fields[fieldCount] = &logproto.DetectedField{ + Label: k, + Type: v.fieldType, + Cardinality: v.Estimate(), + Parsers: p, } - detectedFields := parseDetectedFields(r.FieldLimit, re.Data.Result) - fields := make([]*logproto.DetectedField, len(detectedFields)) - fieldCount := 0 - for k, v := range detectedFields { - p := v.parsers - if len(p) == 0 { - p = nil - } - fields[fieldCount] = &logproto.DetectedField{ - Label: k, - Type: v.fieldType, - Cardinality: v.Estimate(), - Parsers: p, - } + fieldCount++ + } - fieldCount++ - } + dfResp := DetectedFieldsResponse{ + Response: &logproto.DetectedFieldsResponse{ + Fields: fields, + }, + Headers: re.Headers, + } + + // Otherwise all they get is the field limit, which is a bit confusing + if len(fields) > 0 { + dfResp.Response.FieldLimit = r.GetFieldLimit() + } - return &DetectedFieldsResponse{ - Response: &logproto.DetectedFieldsResponse{ - Fields: fields, - FieldLimit: r.GetFieldLimit(), - }, - Headers: re.Headers, - }, nil - }) - }) + return &dfResp, nil + }) } func makeDownstreamRequest( diff --git a/pkg/querier/queryrange/detected_fields_test.go b/pkg/querier/queryrange/detected_fields_test.go index b82f3a4a70de..654a42ac8d00 100644 --- a/pkg/querier/queryrange/detected_fields_test.go +++ b/pkg/querier/queryrange/detected_fields_test.go @@ -1028,10 +1028,7 @@ func TestQuerier_DetectedFields(t *testing.T) { limitedHandler(mockLogfmtStreamWithLabels(1, 5, `{type="test", name="foo"}`)), logHandler(mockLogfmtStreamWithLabels(1, 5, `{type="test", name="foo"}`)), limits, - ).Wrap(base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { - t.Fatal("should not be called") - return nil, nil - })) + ) detectedFields := handleRequest(handler, request) // log lines come from querier_mock_test.go @@ -1058,10 +1055,7 @@ func TestQuerier_DetectedFields(t *testing.T) { limitedHandler(mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type="test", name="bob"}`)), logHandler(mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 5, `{type="test", name="bob"}`)), limits, - ).Wrap(base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { - t.Fatal("should not be called") - return nil, nil - })) + ) detectedFields := handleRequest(handler, request) // log lines come from querier_mock_test.go @@ -1090,10 +1084,7 @@ func TestQuerier_DetectedFields(t *testing.T) { limitedHandler(mockLogfmtStreamWithLabels(1, 2, `{type="test", name="foo"}`)), logHandler(mockLogfmtStreamWithLabels(1, 2, `{type="test", name="foo"}`)), limits, - ).Wrap(base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { - t.Fatal("should not be called") - return nil, nil - })) + ) detectedFields := handleRequest(handler, request) // log lines come from querier_mock_test.go @@ -1136,10 +1127,7 @@ func TestQuerier_DetectedFields(t *testing.T) { ), logHandler(mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 2, `{type="test"}`)), limits, - ).Wrap(base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { - t.Fatal("should not be called") - return nil, nil - })) + ) detectedFields := handleRequest(handler, request) // log lines come from querier_mock_test.go @@ -1188,10 +1176,7 @@ func TestQuerier_DetectedFields(t *testing.T) { ), logHandler(mockLogfmtStreamWithLabelsAndStructuredMetadata(1, 2, `{type="test", name="bob"}`)), limits, - ).Wrap(base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { - t.Fatal("should not be called") - return nil, nil - })) + ) detectedFields := handleRequest(handler, request) // log lines come from querier_mock_test.go diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 8e1c6a04948d..4b9f3dbca9da 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -1214,7 +1214,7 @@ func sharedIndexTripperware( // NewDetectedFieldsTripperware creates a new frontend tripperware responsible for handling detected field requests, which are basically log filter requests with a bit more processing. func NewDetectedFieldsTripperware( limits Limits, - schema config.SchemaConfig, + _ config.SchemaConfig, limitedTripperware base.Middleware, logTripperware base.Middleware, ) (base.Middleware, error) { @@ -1222,7 +1222,6 @@ func NewDetectedFieldsTripperware( limitedHandler := limitedTripperware.Wrap(next) logHandler := logTripperware.Wrap(next) - detectedFieldsHandler := NewDetectedFieldsHandler(limitedHandler, logHandler, limits) - return NewLimitedRoundTripper(next, limits, schema.Configs, detectedFieldsHandler) + return NewDetectedFieldsHandler(limitedHandler, logHandler, limits) }), nil } diff --git a/pkg/querier/queryrange/shard_resolver.go b/pkg/querier/queryrange/shard_resolver.go index 31366d0a0dd7..4fe444c3bc59 100644 --- a/pkg/querier/queryrange/shard_resolver.go +++ b/pkg/querier/queryrange/shard_resolver.go @@ -225,7 +225,10 @@ func (r *dynamicShardResolver) ShardingRanges(expr syntax.Expr, targetBytesPerSh ) { log := spanlogger.FromContext(r.ctx) - adjustedFrom := r.from + var ( + adjustedFrom = r.from + adjustedThrough model.Time + ) // NB(owen-d): there should only ever be 1 matcher group passed // to this call as we call it separately for different legs @@ -236,18 +239,30 @@ func (r *dynamicShardResolver) ShardingRanges(expr syntax.Expr, targetBytesPerSh } for _, grp := range grps { - diff := grp.Interval + grp.Offset + diff := grp.Interval // For instant queries, when start == end, // we have a default lookback which we add here - if grp.Interval == 0 { - diff = diff + r.defaultLookback + if diff == 0 { + diff = r.defaultLookback } + diff += grp.Offset + // use the oldest adjustedFrom if r.from.Add(-diff).Before(adjustedFrom) { adjustedFrom = r.from.Add(-diff) } + + // use the latest adjustedThrough + if r.through.Add(-grp.Offset).After(adjustedThrough) { + adjustedThrough = r.through.Add(-grp.Offset) + } + } + + // handle the case where there are no matchers + if adjustedThrough == 0 { + adjustedThrough = r.through } exprStr := expr.String() @@ -256,7 +271,7 @@ func (r *dynamicShardResolver) ShardingRanges(expr syntax.Expr, targetBytesPerSh // use the retry handler here to retry transient errors resp, err := r.retryNextHandler.Do(r.ctx, &logproto.ShardsRequest{ From: adjustedFrom, - Through: r.through, + Through: adjustedThrough, Query: expr.String(), TargetBytesPerShard: targetBytesPerShard, }) diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index c509783d8661..b1493089750a 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -1894,6 +1894,7 @@ func TestStore_BoltdbTsdbSameIndexPrefix(t *testing.T) { // recreate the store because boltdb-shipper now runs queriers on snapshots which are created every 1 min and during startup. store.Stop() + ResetBoltDBIndexClientsWithShipper() // there should be 2 index tables in the object storage indexTables, err := os.ReadDir(filepath.Join(cfg.FSConfig.Directory, "index")) diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go b/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go index 8edd121071c5..971dcb0fb65b 100644 --- a/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go +++ b/pkg/storage/stores/shipper/indexshipper/downloads/index_set.go @@ -283,6 +283,11 @@ func (t *indexSet) cleanupDB(fileName string) error { } func (t *indexSet) Sync(ctx context.Context) (err error) { + if !t.indexMtx.isReady() { + level.Info(t.logger).Log("msg", "skip sync since the index set is not ready") + return nil + } + return t.syncWithRetry(ctx, true, false) } diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/table.go b/pkg/storage/stores/shipper/indexshipper/downloads/table.go index 1bae83c51e0e..5b9f29c3a0c1 100644 --- a/pkg/storage/stores/shipper/indexshipper/downloads/table.go +++ b/pkg/storage/stores/shipper/indexshipper/downloads/table.go @@ -13,6 +13,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/concurrency" "github.com/pkg/errors" + "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "github.com/grafana/loki/v3/pkg/storage/chunk/client/util" @@ -271,9 +272,22 @@ func (t *table) Sync(ctx context.Context) error { level.Debug(t.logger).Log("msg", fmt.Sprintf("syncing files for table %s", t.name)) t.indexSetsMtx.RLock() - defer t.indexSetsMtx.RUnlock() + users := maps.Keys(t.indexSets) + t.indexSetsMtx.RUnlock() + + for _, userID := range users { + if err := ctx.Err(); err != nil { + return err + } + + t.indexSetsMtx.RLock() + indexSet, ok := t.indexSets[userID] + t.indexSetsMtx.RUnlock() + + if !ok { + continue + } - for userID, indexSet := range t.indexSets { if err := indexSet.Sync(ctx); err != nil { return errors.Wrap(err, fmt.Sprintf("failed to sync index set %s for table %s", userID, t.name)) } diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go index 6b6927259378..3b4bc4bfb3fc 100644 --- a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go +++ b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go @@ -14,6 +14,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "golang.org/x/exp/maps" "github.com/grafana/loki/v3/pkg/compactor/deletion" "github.com/grafana/loki/v3/pkg/storage/chunk/client/util" @@ -180,6 +181,10 @@ func (tm *tableManager) ForEach(ctx context.Context, tableName, userID string, c } func (tm *tableManager) getOrCreateTable(tableName string) (Table, error) { + if tm.ctx.Err() != nil { + return nil, errors.New("table manager is stopping") + } + // if table is already there, use it. start := time.Now() tm.tablesMtx.RLock() @@ -214,7 +219,8 @@ func (tm *tableManager) getOrCreateTable(tableName string) (Table, error) { func (tm *tableManager) syncTables(ctx context.Context) error { tm.tablesMtx.RLock() - defer tm.tablesMtx.RUnlock() + tables := maps.Keys(tm.tables) + tm.tablesMtx.RUnlock() start := time.Now() var err error @@ -231,11 +237,24 @@ func (tm *tableManager) syncTables(ctx context.Context) error { level.Info(tm.logger).Log("msg", "syncing tables") - for name, table := range tm.tables { + for _, name := range tables { + if err := ctx.Err(); err != nil { + return err + } + level.Debug(tm.logger).Log("msg", "syncing table", "table", name) start := time.Now() + + tm.tablesMtx.RLock() + table, ok := tm.tables[name] + tm.tablesMtx.RUnlock() + + if !ok { + continue + } + err := table.Sync(ctx) - duration := float64(time.Since(start)) + duration := time.Since(start).Seconds() if err != nil { tm.metrics.tableSyncLatency.WithLabelValues(name, statusFailure).Observe(duration) return errors.Wrapf(err, "failed to sync table '%s'", name) diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/util.go b/pkg/storage/stores/shipper/indexshipper/downloads/util.go index 457f76b3433d..4c5fcfee1674 100644 --- a/pkg/storage/stores/shipper/indexshipper/downloads/util.go +++ b/pkg/storage/stores/shipper/indexshipper/downloads/util.go @@ -23,6 +23,15 @@ func (m *mtxWithReadiness) markReady() { close(m.ready) } +func (m *mtxWithReadiness) isReady() bool { + select { + case <-m.ready: + return true + default: + return false + } +} + func (m *mtxWithReadiness) awaitReady(ctx context.Context) error { ctx, cancel := context.WithTimeoutCause(ctx, 30*time.Second, errors.New("exceeded 30 seconds in awaitReady")) defer cancel() diff --git a/production/helm/loki/src/helm-test/Dockerfile b/production/helm/loki/src/helm-test/Dockerfile index bb71f28b98ed..9645b206b105 100644 --- a/production/helm/loki/src/helm-test/Dockerfile +++ b/production/helm/loki/src/helm-test/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 FROM golang:${GO_VERSION} as build # build via Makefile target helm-test-image in root diff --git a/tools/dev/loki-tsdb-storage-s3/dev.dockerfile b/tools/dev/loki-tsdb-storage-s3/dev.dockerfile index d9cc7d0dab5f..d8526a3d9385 100644 --- a/tools/dev/loki-tsdb-storage-s3/dev.dockerfile +++ b/tools/dev/loki-tsdb-storage-s3/dev.dockerfile @@ -1,8 +1,8 @@ -FROM golang:1.22.6 +FROM golang:1.23 ENV CGO_ENABLED=0 RUN go install github.com/go-delve/delve/cmd/dlv@v1.22.1 -FROM alpine:3.20.2 +FROM alpine:3.20.3 RUN mkdir /loki WORKDIR /loki