diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index d9bfee681f2a..49f67f06861a 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -640,6 +640,7 @@ local build_image_tag = '0.33.0'; 'GIT_TARGET_BRANCH="$DRONE_TARGET_BRANCH"', ]) { depends_on: ['loki'], when: onPRs }, make('validate-example-configs', container=false) { depends_on: ['loki'] }, + make('validate-dev-cluster-config', container=false) { depends_on: ['validate-example-configs'] }, make('check-example-config-doc', container=false) { depends_on: ['clone'] }, { name: 'build-docs-website', diff --git a/.drone/drone.yml b/.drone/drone.yml index a4b2b44299ce..7a62b621262a 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -303,6 +303,13 @@ steps: environment: {} image: grafana/loki-build-image:0.33.0 name: validate-example-configs +- commands: + - make BUILD_IN_CONTAINER=false validate-dev-cluster-config + depends_on: + - validate-example-configs + environment: {} + image: grafana/loki-build-image:0.33.0 + name: validate-dev-cluster-config - commands: - make BUILD_IN_CONTAINER=false check-example-config-doc depends_on: @@ -2106,6 +2113,6 @@ kind: secret name: gpg_private_key --- kind: signature -hmac: 30f2fb121d8271e00dc2ae8fe83a32e0e22fd2bd268609d0c3f295033fcd4fb6 +hmac: 457592d17208477ceb480f81dbdb88f7b95a5ad015c88d9d6fed06c2422a52f9 ... diff --git a/CHANGELOG.md b/CHANGELOG.md index 348e1dc86ba9..04b692efcb00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,8 @@ ##### Enhancements -* [11571](https://github.com/grafana/loki/pull/11571) **MichelHollands**: Add a metrics.go log line for requests from querier to ingester +* [11633](https://github.com/grafana/loki/pull/11633) **cyriltovena**: Add profiling integrations to tracing instrumentation. +* [11571](https://github.com/grafana/loki/pull/11571) **MichelHollands**: Add a metrics.go log line for requests from querier to ingester * [11477](https://github.com/grafana/loki/pull/11477) **MichelHollands**: support GET for /ingester/shutdown * [11363](https://github.com/grafana/loki/pull/11363) **kavirajk**: bugfix(memcached): Make memcached batch fetch truely context aware. * [11319](https://github.com/grafana/loki/pull/11319) **someStrangerFromTheAbyss**: Helm: Add extraContainers to the write pods. @@ -46,14 +47,22 @@ * [11539](https://github.com/grafana/loki/pull/11539) **kaviraj,ashwanthgoli** Support caching /series and /labels query results * [11545](https://github.com/grafana/loki/pull/11545) **dannykopping** Force correct memcached timeout when fetching chunks. * [11589](https://github.com/grafana/loki/pull/11589) **ashwanthgoli** Results Cache: Adds `query_length_served` cache stat to measure the length of the query served from cache. +* [11535](https://github.com/grafana/loki/pull/11535) **dannykopping** Query Frontend: Allow customisable splitting of queries which overlap the `query_ingester_within` window to reduce query pressure on ingesters. +* [11654](https://github.com/grafana/loki/pull/11654) **dannykopping** Cache: atomically check background cache size limit correctly. +* [11682](https://github.com/grafana/loki/pull/11682) **ashwanthgoli** Metadata cache: Adds `frontend.max-metadata-cache-freshness` to configure the time window for which metadata results are not cached. This helps avoid returning inaccurate results by not caching recent results. +* [11679](https://github.com/grafana/loki/pull/11679) **dannykopping** Cache: extending #11535 to align custom ingester query split with cache keys for correct caching of results. +* [11499](https://github.com/grafana/loki/pull/11284) **jmichalek132** Config: Adds `frontend.log-query-request-headers` to enable logging of request headers in query logs. ##### Fixes * [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var. * [11195](https://github.com/grafana/loki/pull/11195) **canuteson** Generate tsdb_shipper storage_config even if using_boltdb_shipper is false * [9831](https://github.com/grafana/loki/pull/9831) **sijmenhuizenga**: Fix Promtail excludepath not evaluated on newly added files. * [11551](https://github.com/grafana/loki/pull/11551) **dannykopping** Do not reflect label names in request metrics' "route" label. +* [11563](https://github.com/grafana/loki/pull/11563) **ptqa** Fix duplicate logs from docker containers. * [11601](https://github.com/grafana/loki/pull/11601) **dannykopping** Ruler: Fixed a panic that can be caused by concurrent read-write access of tenant configs when there are a large amount of rules. * [11606](https://github.com/grafana/loki/pull/11606) **dannykopping** Fixed regression adding newlines to HTTP error response bodies which may break client integrations. +* [11657](https://github.com/grafana/loki/pull/11657) **ashwanthgoli** Log results cache: compose empty response based on the request being served to avoid returning incorrect limit or direction. +* [11587](https://github.com/grafana/loki/pull/11587) **trevorwhitney** Fix semantics of label parsing logic of metrics and logs queries. Both only parse the first label if multiple extractions into the same label are requested. ##### Changes @@ -64,10 +73,12 @@ * [10959](https://github.com/grafana/loki/pull/10959) **slim-bean** introduce a backoff wait on subquery retries. * [11121](https://github.com/grafana/loki/pull/11121) **periklis** Ensure all lifecycler cfgs ref a valid IPv6 addr and port combination * [10650](https://github.com/grafana/loki/pull/10650) **matthewpi** Ensure the frontend uses a valid IPv6 addr and port combination +* [11665](https://github.com/grafana/loki/pull/11665) **salvacorts** Deprecate and flip `-legacy-read-mode` flag to `false` by default. #### Promtail * [10752](https://github.com/grafana/loki/pull/10752) **gonzalesraul**: structured_metadata: enable structured_metadata convert labels +* [11511](https://github.com/grafana/loki/pull/11511) **kavirajk**: chore(promtail): Improve default configuration that is shipped with rpm/deb packages to avoid possible high CPU utilisation if there are lots of files inside `/var/log`. ##### Enhancements @@ -92,6 +103,7 @@ #### Mixins * [11087](https://github.com/grafana/loki/pull/11087) **JoaoBraveCoding**: Adds structured metadata panels for ingested data +* [11637](https://github.com/grafana/loki/pull/11637) **JoaoBraveCoding**: Add route to write Distributor Latency dashboard #### Fixes diff --git a/MAINTAINERS.md b/MAINTAINERS.md index ba11c534b1b6..072cc863392a 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,4 +1,4 @@ @slim-bean is the main/default maintainer. Some parts of the codebase have other maintainers: -- `@grafana/docs-logs`, which includes [@osg-grafana](https://github.com/osg-grafana) ([Grafana Labs](https://grafana.com/)) and [@knylander-grafana](https://github.com/knylander-grafana) ([Grafana Labs](https://grafana.com/)) +- `@grafana/docs-logs`, which includes [@knylander-grafana](https://github.com/knylander-grafana) ([Grafana Labs](https://grafana.com/)) diff --git a/Makefile b/Makefile index 50938cac56d9..d311ed1c4f3c 100644 --- a/Makefile +++ b/Makefile @@ -801,6 +801,9 @@ EXAMPLES_SKIP_VALIDATION_FLAG := "doc-example:skip-validation=true" validate-example-configs: loki for f in $$(grep -rL $(EXAMPLES_SKIP_VALIDATION_FLAG) $(EXAMPLES_YAML_PATH)/*.yaml); do echo "Validating provided example config: $$f" && ./cmd/loki/loki -config.file=$$f -verify-config || exit 1; done +validate-dev-cluster-config: loki + ./cmd/loki/loki -config.file=./tools/dev/loki-boltdb-storage-s3/config/loki.yaml -verify-config + # Dynamically generate ./docs/sources/configure/examples.md using the example configs that we provide. # This target should be run if any of our example configs change. generate-example-config-doc: diff --git a/clients/cmd/fluent-bit/config.go b/clients/cmd/fluent-bit/config.go index 768ba845a330..469e18d495d7 100644 --- a/clients/cmd/fluent-bit/config.go +++ b/clients/cmd/fluent-bit/config.go @@ -2,7 +2,6 @@ package main import ( "encoding/json" - "errors" "fmt" "os" "strconv" @@ -67,7 +66,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { } err := clientURL.Set(url) if err != nil { - return nil, errors.New("failed to parse client URL") + return nil, fmt.Errorf("failed to parse client URL: %w", err) } res.clientConfig.URL = clientURL @@ -83,7 +82,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { } else { batchWaitValue, err := time.ParseDuration(batchWait) if err != nil { - return nil, fmt.Errorf("failed to parse BatchWait: %s", batchWait) + return nil, fmt.Errorf("failed to parse BatchWait %s: %w", batchWait, err) } res.clientConfig.BatchWait = batchWaitValue } @@ -93,7 +92,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { if batchSize != "" { batchSizeValue, err := strconv.Atoi(batchSize) if err != nil { - return nil, fmt.Errorf("failed to parse BatchSize: %s", batchSize) + return nil, fmt.Errorf("failed to parse BatchSize %s: %w", batchSize, err) } res.clientConfig.BatchSize = batchSizeValue } @@ -102,7 +101,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { if timeout != "" { timeoutValue, err := time.ParseDuration(timeout) if err != nil { - return nil, fmt.Errorf("failed to parse Timeout: %s", timeout) + return nil, fmt.Errorf("failed to parse Timeout %s: %w", timeout, err) } res.clientConfig.Timeout = timeoutValue } @@ -111,7 +110,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { if minBackoff != "" { minBackoffValue, err := time.ParseDuration(minBackoff) if err != nil { - return nil, fmt.Errorf("failed to parse MinBackoff: %s", minBackoff) + return nil, fmt.Errorf("failed to parse MinBackoff %s: %w", minBackoff, err) } res.clientConfig.BackoffConfig.MinBackoff = minBackoffValue } @@ -120,7 +119,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { if maxBackoff != "" { maxBackoffValue, err := time.ParseDuration(maxBackoff) if err != nil { - return nil, fmt.Errorf("failed to parse MaxBackoff: %s", maxBackoff) + return nil, fmt.Errorf("failed to parse MaxBackoff %s: %w", maxBackoff, err) } res.clientConfig.BackoffConfig.MaxBackoff = maxBackoffValue } @@ -129,7 +128,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { if maxRetries != "" { maxRetriesValue, err := strconv.Atoi(maxRetries) if err != nil { - return nil, fmt.Errorf("failed to parse MaxRetries: %s", maxRetries) + return nil, fmt.Errorf("failed to parse MaxRetries %s: %w", maxRetries, err) } res.clientConfig.BackoffConfig.MaxRetries = maxRetriesValue } @@ -154,7 +153,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { } var level log.Level if err := level.Set(logLevel); err != nil { - return nil, fmt.Errorf("invalid log level: %v", logLevel) + return nil, fmt.Errorf("invalid log level %v: %w", logLevel, err) } res.logLevel = level @@ -238,7 +237,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) { if queueSegmentSize != "" { res.bufferConfig.dqueConfig.queueSegmentSize, err = strconv.Atoi(queueSegmentSize) if err != nil { - return nil, fmt.Errorf("impossible to convert string to integer DqueSegmentSize: %v", queueSegmentSize) + return nil, fmt.Errorf("impossible to convert string to integer DqueSegmentSize %v: %w", queueSegmentSize, err) } } diff --git a/clients/pkg/promtail/targets/docker/target.go b/clients/pkg/promtail/targets/docker/target.go index 39b91ce21a5b..bb26391ab199 100644 --- a/clients/pkg/promtail/targets/docker/target.go +++ b/clients/pkg/promtail/targets/docker/target.go @@ -222,6 +222,7 @@ func (t *Target) process(r io.Reader, logStream string) { } t.metrics.dockerEntries.Inc() t.positions.Put(positions.CursorKey(t.containerName), ts.Unix()) + t.since = ts.Unix() } } diff --git a/clients/pkg/promtail/targets/docker/target_test.go b/clients/pkg/promtail/targets/docker/target_test.go index d2d2e58b3caa..e9bbf15b55bb 100644 --- a/clients/pkg/promtail/targets/docker/target_test.go +++ b/clients/pkg/promtail/targets/docker/target_test.go @@ -27,7 +27,13 @@ func Test_DockerTarget(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { switch path := r.URL.Path; { case strings.HasSuffix(path, "/logs"): - dat, err := os.ReadFile("testdata/flog.log") + var filePath string + if strings.Contains(r.URL.RawQuery, "since=0") { + filePath = "testdata/flog.log" + } else { + filePath = "testdata/flog_after_restart.log" + } + dat, err := os.ReadFile(filePath) require.NoError(t, err) _, err = w.Write(dat) require.NoError(t, err) @@ -59,7 +65,7 @@ func Test_DockerTarget(t *testing.T) { }) require.NoError(t, err) - _, err = NewTarget( + target, err := NewTarget( NewMetrics(prometheus.NewRegistry()), logger, entryHandler, @@ -92,4 +98,28 @@ func Test_DockerTarget(t *testing.T) { actualLines = append(actualLines, entry.Line) } require.ElementsMatch(t, actualLines, expectedLines) + + // restart target to simulate container restart + target.startIfNotRunning() + entryHandler.Clear() + require.Eventually(t, func() bool { + return len(entryHandler.Received()) >= 5 + }, 5*time.Second, 100*time.Millisecond) + + receivedAfterRestart := entryHandler.Received() + sort.Slice(receivedAfterRestart, func(i, j int) bool { + return receivedAfterRestart[i].Timestamp.Before(receivedAfterRestart[j].Timestamp) + }) + actualLinesAfterRestart := make([]string, 0, 5) + for _, entry := range receivedAfterRestart[:5] { + actualLinesAfterRestart = append(actualLinesAfterRestart, entry.Line) + } + expectedLinesAfterRestart := []string{ + "243.115.12.215 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /morph/exploit/granular HTTP/1.0\" 500 26468", + "221.41.123.237 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /user-centric/whiteboard HTTP/2.0\" 205 22487", + "89.111.144.144 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /open-source/e-commerce HTTP/1.0\" 401 11092", + "62.180.191.187 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /cultivate/integrate/technologies HTTP/2.0\" 302 12979", + "156.249.2.192 - - [09/Dec/2023:09:16:57 +0000] \"POST /revolutionize/mesh/metrics HTTP/2.0\" 401 5297", + } + require.ElementsMatch(t, actualLinesAfterRestart, expectedLinesAfterRestart) } diff --git a/clients/pkg/promtail/targets/docker/testdata/flog_after_restart.log b/clients/pkg/promtail/targets/docker/testdata/flog_after_restart.log new file mode 100644 index 000000000000..59afb576805e Binary files /dev/null and b/clients/pkg/promtail/targets/docker/testdata/flog_after_restart.log differ diff --git a/cmd/loki/main.go b/cmd/loki/main.go index 845104eee8de..937a5c16fab8 100644 --- a/cmd/loki/main.go +++ b/cmd/loki/main.go @@ -10,7 +10,9 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/log" + "github.com/grafana/dskit/spanprofiler" "github.com/grafana/dskit/tracing" + "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/version" @@ -84,7 +86,9 @@ func main() { if err != nil { level.Error(util_log.Logger).Log("msg", "error in initializing tracing. tracing will not be enabled", "err", err) } - + if config.Tracing.ProfilingEnabled { + opentracing.SetGlobalTracer(spanprofiler.NewTracer(opentracing.GlobalTracer())) + } defer func() { if trace != nil { if err := trace.Close(); err != nil { diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index e2185c19474f..edb394733a3c 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -731,6 +731,11 @@ The `frontend` block configures the Loki query-frontend. # CLI flag: -frontend.log-queries-longer-than [log_queries_longer_than: | default = 0s] +# Comma-separated list of request header names to include in query logs. Applies +# to both query stats and slow queries logs. +# CLI flag: -frontend.log-query-request-headers +[log_query_request_headers: | default = ""] + # Max body size for downstream prometheus. # CLI flag: -frontend.max-body-size [max_body_size: | default = 10485760] @@ -2830,6 +2835,12 @@ The `limits_config` block configures global and per-tenant limits in Loki. # CLI flag: -frontend.max-cache-freshness [max_cache_freshness_per_query: | default = 10m] +# Do not cache metadata request if the end time is within the +# frontend.max-metadata-cache-freshness window. Set this to 0 to apply no such +# limits. Defaults to 24h. +# CLI flag: -frontend.max-metadata-cache-freshness +[max_metadata_cache_freshness: | default = 1d] + # Do not cache requests with an end time that falls within Now minus this # duration. 0 disables this feature (default). # CLI flag: -frontend.max-stats-cache-freshness @@ -2884,6 +2895,12 @@ The `limits_config` block configures global and per-tenant limits in Loki. # CLI flag: -querier.split-metadata-queries-by-interval [split_metadata_queries_by_interval: | default = 1d] +# Interval to use for time-based splitting when a request is within the +# `query_ingesters_within` window; defaults to `split-queries-by-interval` by +# setting to 0. +# CLI flag: -querier.split-ingester-queries-by-interval +[split_ingester_queries_by_interval: | default = 0s] + # Limit queries that can be sharded. Queries within the time range of now and # now minus this sharding lookback are not sharded. The default value of 0s # disables the lookback, causing sharding of all queries at all times. @@ -3088,6 +3105,10 @@ shard_streams: # CLI flag: -bloom-compactor.enable-compaction [bloom_compactor_enable_compaction: | default = false] +# The batch size of the chunks the bloom-compactor downloads at once. +# CLI flag: -bloom-compactor.chunks-batch-size +[bloom_compactor_chunks_batch_size: | default = 100] + # Length of the n-grams created when computing blooms from log lines. # CLI flag: -bloom-compactor.ngram-length [bloom_ngram_length: | default = 4] diff --git a/docs/sources/query/query_examples.md b/docs/sources/query/query_examples.md index 7eb4145acf74..1298a36f4a91 100644 --- a/docs/sources/query/query_examples.md +++ b/docs/sources/query/query_examples.md @@ -50,7 +50,7 @@ These LogQL query examples have explanations of what the queries accomplish. != "grafana_com" |= "session opened" != "sudo: " - |regexp "(^(?P\\S+ {1,2}){11})" + | regexp "(^(?P\\S+ {1,2}){11})" | line_format "USER = {{.user}}" ``` diff --git a/docs/sources/send-data/fluentbit/_index.md b/docs/sources/send-data/fluentbit/_index.md index c9088fdc8f88..a8d052c7262a 100644 --- a/docs/sources/send-data/fluentbit/_index.md +++ b/docs/sources/send-data/fluentbit/_index.md @@ -8,7 +8,9 @@ weight: 500 --- # Fluent Bit client -[Fluent Bit](https://fluentbit.io/) is a fast and lightweight logs and metrics processor and forwarder that can be configured with the [Grafana Loki output plugin](https://docs.fluentbit.io/manual/pipeline/outputs/loki) to ship logs to Loki. You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines. +[Fluent Bit](https://fluentbit.io/) is a fast and lightweight logs and metrics processor and forwarder that can be configured with the Grafana Fluent Bit Plugin described here or with the [Fluent-bit Loki output plugin](https://docs.fluentbit.io/manual/pipeline/outputs/loki) to ship logs to Loki. +This plugin has more configuration options compared to the built-in Fluent Bit Loki plugin. +You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines. ## Usage @@ -63,23 +65,59 @@ To ship logs from Docker containers to Grafana Cloud using Fluent Bit, you can u You can run Fluent Bit as a [Daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) to collect all your Kubernetes workload logs. -To do so you can use our [Fluent Bit helm chart](https://github.com/grafana/helm-charts/tree/main/charts/fluent-bit): +To do so you can use the [Fluent Bit helm chart](https://github.com/fluent/helm-charts) with the following `values.yaml` changing the value of `FLUENT_LOKI_URL`: + +```yaml +image: + # Here we use the Docker image which has the plugin installed + repository: grafana/fluent-bit-plugin-loki + tag: main-e2ed1c0 + +args: + - "-e" + - "/fluent-bit/bin/out_grafana_loki.so" + - --workdir=/fluent-bit/etc + - --config=/fluent-bit/etc/conf/fluent-bit.conf + +env: + # Note that for security reasons you should fetch the credentials through a Kubernetes Secret https://kubernetes.io/docs/concepts/configuration/secret/ . You may use the envFrom for this. + - name: FLUENT_LOKI_URL + value: https://user:pass@your-loki.endpoint/loki/api/v1/push + +config: + inputs: | + [INPUT] + Name tail + Tag kube.* + Path /var/log/containers/*.log + # Be aware that local clusters like docker-desktop or kind use the docker log format and not the cri (https://docs.fluentbit.io/manual/installation/kubernetes#container-runtime-interface-cri-parser) + multiline.parser docker, cri + Mem_Buf_Limit 5MB + Skip_Long_Lines On + + outputs: | + [Output] + Name grafana-loki + Match kube.* + Url ${FLUENT_LOKI_URL} + Labels {job="fluent-bit"} + LabelKeys level,app # this sets the values for actual Loki streams and the other labels are converted to structured_metadata https://grafana.com/docs/loki/latest/get-started/labels/structured-metadata/ + BatchWait 1 + BatchSize 1001024 + LineFormat json + LogLevel info + AutoKubernetesLabels true +``` ```bash -helm repo add grafana https://grafana.github.io/helm-charts +helm repo add fluent https://fluent.github.io/helm-charts helm repo update -helm upgrade --install fluent-bit grafana/fluent-bit \ - --set loki.serviceName=loki.svc.cluster.local +helm install fluent-bit fluent/fluent-bit -f values.yaml ``` By default it will collect all containers logs and extract labels from Kubernetes API (`container_name`, `namespace`, etc..). -Alternatively you can install the Loki and Fluent Bit all together using: - -```bash -helm upgrade --install loki-stack grafana/loki-stack \ - --set fluent-bit.enabled=true,promtail.enabled=false -``` +If you also want to host your Loki instance inside the cluster install the [official Loki helm chart](https://grafana.com/docs/loki/latest/setup/install/helm/). ### AWS Elastic Container Service (ECS) diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 631aadacac02..91df60bffd88 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -1242,6 +1242,15 @@ null
 null
 
+ + + + gateway.nginxConfig.enableIPv6 + bool + Enable listener for IPv6, disable on IPv4-only systems +
+true
+
@@ -2707,6 +2716,15 @@ true
 {}
 
+ + + + monitoring.rules.disabled + object + If you disable all the alerts and keep .monitoring.rules.alerting set to true, the chart will fail to render. +
+{}
+
diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md index 663201820e1e..84fac5835b31 100644 --- a/docs/sources/setup/upgrade/_index.md +++ b/docs/sources/setup/upgrade/_index.md @@ -165,6 +165,7 @@ This new metric will provide a more clear signal that there is an issue with ing | `querier.tsdb-max-query-parallelism` | 128 | 512 | - | | `query-scheduler.max-outstanding-requests-per-tenant` | 32000 | 100 | - | | `validation.max-label-names-per-series` | 15 | 30 | - | +| `legacy-read-mode` | false | true | Deprecated. It will be removed in the next minor release. | {{% /responsive-table %}} #### Write dedupe cache is deprecated diff --git a/integration/client/client.go b/integration/client/client.go index dcf2c036dc9e..2e5a86aa6b3d 100644 --- a/integration/client/client.go +++ b/integration/client/client.go @@ -479,12 +479,21 @@ type Header struct { Name, Value string } -// RunRangeQuery runs a query and returns an error if anything went wrong +// RunRangeQuery runs a 7d query and returns an error if anything went wrong +// This function is kept to keep backwards copatibility of existing tests. +// Better use (*Client).RunRangeQueryWithStartEnd() func (c *Client) RunRangeQuery(ctx context.Context, query string, extraHeaders ...Header) (*Response, error) { + end := c.Now.Add(time.Second) + start := c.Now.Add(-7 * 24 * time.Hour) + return c.RunRangeQueryWithStartEnd(ctx, query, start, end, extraHeaders...) +} + +// RunRangeQuery runs a query and returns an error if anything went wrong +func (c *Client) RunRangeQueryWithStartEnd(ctx context.Context, query string, start, end time.Time, extraHeaders ...Header) (*Response, error) { ctx, cancelFunc := context.WithTimeout(ctx, requestTimeout) defer cancelFunc() - buf, statusCode, err := c.run(ctx, c.rangeQueryURL(query), extraHeaders...) + buf, statusCode, err := c.run(ctx, c.rangeQueryURL(query, start, end), extraHeaders...) if err != nil { return nil, err } @@ -555,11 +564,11 @@ func (c *Client) parseResponse(buf []byte, statusCode int) (*Response, error) { return &lokiResp, nil } -func (c *Client) rangeQueryURL(query string) string { +func (c *Client) rangeQueryURL(query string, start, end time.Time) string { v := url.Values{} v.Set("query", query) - v.Set("start", formatTS(c.Now.Add(-7*24*time.Hour))) - v.Set("end", formatTS(c.Now.Add(time.Second))) + v.Set("start", formatTS(start)) + v.Set("end", formatTS(end)) u, err := url.Parse(c.baseURL) if err != nil { diff --git a/integration/cluster/cluster.go b/integration/cluster/cluster.go index 8ddeac00f178..831da46f2cb9 100644 --- a/integration/cluster/cluster.go +++ b/integration/cluster/cluster.go @@ -43,7 +43,6 @@ server: grpc_server_max_recv_msg_size: 110485813 grpc_server_max_send_msg_size: 110485813 - common: path_prefix: {{.dataPath}} storage: @@ -70,14 +69,25 @@ storage_config: store-1: directory: {{.sharedDataPath}}/fs-store-1 boltdb_shipper: - active_index_directory: {{.dataPath}}/index + active_index_directory: {{.dataPath}}/boltdb-index cache_location: {{.dataPath}}/boltdb-cache tsdb_shipper: active_index_directory: {{.dataPath}}/tsdb-index cache_location: {{.dataPath}}/tsdb-cache + bloom_shipper: + working_directory: {{.dataPath}}/bloom-shipper + blocks_downloading_queue: + workers_count: 1 + +bloom_gateway: + enabled: false + +bloom_compactor: + enabled: false + working_directory: {{.dataPath}}/bloom-compactor compactor: - working_directory: {{.dataPath}}/retention + working_directory: {{.dataPath}}/compactor retention_enabled: true delete_request_store: store-1 @@ -154,14 +164,14 @@ func New(logLevel level.Value, opts ...func(*Cluster)) *Cluster { } resetMetricRegistry() - sharedPath, err := os.MkdirTemp("", "loki-shared-data") + sharedPath, err := os.MkdirTemp("", "loki-shared-data-") if err != nil { panic(err.Error()) } overridesFile := filepath.Join(sharedPath, "loki-overrides.yaml") - err = os.WriteFile(filepath.Join(sharedPath, "loki-overrides.yaml"), []byte(`overrides:`), 0777) + err = os.WriteFile(overridesFile, []byte(`overrides:`), 0777) if err != nil { panic(fmt.Errorf("error creating overrides file: %w", err)) } @@ -318,12 +328,12 @@ func port(addr string) string { func (c *Component) writeConfig() error { var err error - configFile, err := os.CreateTemp("", "loki-config") + configFile, err := os.CreateTemp("", fmt.Sprintf("loki-%s-config-*.yaml", c.name)) if err != nil { return fmt.Errorf("error creating config file: %w", err) } - c.dataPath, err = os.MkdirTemp("", "loki-data") + c.dataPath, err = os.MkdirTemp("", fmt.Sprintf("loki-%s-data-", c.name)) if err != nil { return fmt.Errorf("error creating data path: %w", err) } @@ -408,6 +418,8 @@ func (c *Component) run() error { c.configFile, "-limits.per-user-override-config", c.overridesFile, + "-limits.per-user-override-period", + "1s", ), flagset); err != nil { return err } diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go index a4d03ed10a67..0c05d13d8ef3 100644 --- a/integration/loki_micro_services_test.go +++ b/integration/loki_micro_services_test.go @@ -3,16 +3,20 @@ package integration import ( "context" "encoding/json" + "fmt" + "math/rand" "strings" "sync" "testing" "time" + "github.com/go-kit/log/level" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "google.golang.org/protobuf/proto" "github.com/grafana/loki/integration/client" @@ -1056,16 +1060,219 @@ func TestCategorizedLabels(t *testing.T) { } } +func TestBloomFiltersEndToEnd(t *testing.T) { + commonFlags := []string{ + "-bloom-compactor.compaction-interval=10s", + "-bloom-compactor.enable-compaction=true", + "-bloom-compactor.enabled=true", + "-bloom-gateway.enable-filtering=true", + "-bloom-gateway.enabled=true", + "-compactor.compaction-interval=1s", + "-frontend.default-validity=0s", + "-ingester.flush-on-shutdown=true", + "-ingester.wal-enabled=false", + "-query-scheduler.use-scheduler-ring=false", + "-store.index-cache-read.embedded-cache.enabled=true", + } + + tenantID := randStringRunes() + + clu := cluster.New( + level.DebugValue(), + cluster.SchemaWithTSDB, + func(c *cluster.Cluster) { c.SetSchemaVer("v13") }, + ) + + defer func() { + assert.NoError(t, clu.Cleanup()) + }() + + var ( + tDistributor = clu.AddComponent( + "distributor", + append( + commonFlags, + "-target=distributor", + )..., + ) + tIndexGateway = clu.AddComponent( + "index-gateway", + append( + commonFlags, + "-target=index-gateway", + )..., + ) + tBloomGateway = clu.AddComponent( + "bloom-gateway", + append( + commonFlags, + "-target=bloom-gateway", + )..., + ) + ) + require.NoError(t, clu.Run()) + + var ( + tIngester = clu.AddComponent( + "ingester", + append( + commonFlags, + "-target=ingester", + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + )..., + ) + tQueryScheduler = clu.AddComponent( + "query-scheduler", + append( + commonFlags, + "-target=query-scheduler", + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + )..., + ) + tCompactor = clu.AddComponent( + "compactor", + append( + commonFlags, + "-target=compactor", + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + )..., + ) + tBloomCompactor = clu.AddComponent( + "bloom-compactor", + append( + commonFlags, + "-target=bloom-compactor", + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + )..., + ) + ) + require.NoError(t, clu.Run()) + + // finally, run the query-frontend and querier. + var ( + tQueryFrontend = clu.AddComponent( + "query-frontend", + append( + commonFlags, + "-target=query-frontend", + "-frontend.scheduler-address="+tQueryScheduler.GRPCURL(), + "-common.compactor-address="+tCompactor.HTTPURL(), + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + )..., + ) + _ = clu.AddComponent( + "querier", + append( + commonFlags, + "-target=querier", + "-querier.scheduler-address="+tQueryScheduler.GRPCURL(), + "-common.compactor-address="+tCompactor.HTTPURL(), + "-tsdb.shipper.index-gateway-client.server-address="+tIndexGateway.GRPCURL(), + )..., + ) + ) + require.NoError(t, clu.Run()) + + now := time.Now() + + cliDistributor := client.New(tenantID, "", tDistributor.HTTPURL()) + cliDistributor.Now = now + + cliIngester := client.New(tenantID, "", tIngester.HTTPURL()) + cliIngester.Now = now + + cliQueryFrontend := client.New(tenantID, "", tQueryFrontend.HTTPURL()) + cliQueryFrontend.Now = now + + cliIndexGateway := client.New(tenantID, "", tIndexGateway.HTTPURL()) + cliIndexGateway.Now = now + + cliBloomGateway := client.New(tenantID, "", tBloomGateway.HTTPURL()) + cliBloomGateway.Now = now + + cliBloomCompactor := client.New(tenantID, "", tBloomCompactor.HTTPURL()) + cliBloomCompactor.Now = now + + lineTpl := `caller=loki_micro_services_test.go msg="push log line" id="%s"` + // ingest logs from 10 different pods + // each line contains a random, unique string + // that string is used to verify filtering using bloom gateway + uniqueStrings := make([]string, 600) + for i := 0; i < len(uniqueStrings); i++ { + id := randStringRunes() + id = fmt.Sprintf("%s-%d", id, i) + uniqueStrings[i] = id + pod := fmt.Sprintf("pod-%d", i%10) + line := fmt.Sprintf(lineTpl, id) + err := cliDistributor.PushLogLine(line, now.Add(-1*time.Hour).Add(time.Duration(i-len(uniqueStrings))*time.Second), nil, map[string]string{"pod": pod}) + require.NoError(t, err) + } + + // restart ingester to flush chunks and that there are zero chunks in memory + require.NoError(t, cliIngester.Flush()) + require.NoError(t, tIngester.Restart()) + + // wait for compactor to compact index and for bloom compactor to build bloom filters + require.Eventually(t, func() bool { + // verify metrics that observe usage of block for filtering + metrics, err := cliBloomCompactor.Metrics() + require.NoError(t, err) + successfulRunCount := getMetricValue(t, "loki_bloomcompactor_runs_completed_total", metrics) + t.Log("successful bloom compactor runs", successfulRunCount) + return successfulRunCount == 1 + }, 30*time.Second, time.Second) + + // use bloom gateway to perform needle in the haystack queries + randIdx := rand.Intn(len(uniqueStrings)) + q := fmt.Sprintf(`{job="varlog"} |= "%s"`, uniqueStrings[randIdx]) + end := now.Add(-1 * time.Second) + start := end.Add(-24 * time.Hour) + resp, err := cliQueryFrontend.RunRangeQueryWithStartEnd(context.Background(), q, start, end) + require.NoError(t, err) + + // verify response + require.Len(t, resp.Data.Stream, 1) + expectedLine := fmt.Sprintf(lineTpl, uniqueStrings[randIdx]) + require.Equal(t, expectedLine, resp.Data.Stream[0].Values[0][1]) + + // verify metrics that observe usage of block for filtering + bloomGwMetrics, err := cliBloomGateway.Metrics() + require.NoError(t, err) + + unfilteredCount := getMetricValue(t, "loki_bloom_gateway_chunkrefs_pre_filtering", bloomGwMetrics) + require.Equal(t, float64(10), unfilteredCount) + + filteredCount := getMetricValue(t, "loki_bloom_gateway_chunkrefs_post_filtering", bloomGwMetrics) + require.Equal(t, float64(1), filteredCount) + + mf, err := extractMetricFamily("loki_bloom_gateway_bloom_query_latency", bloomGwMetrics) + require.NoError(t, err) + + count := getValueFromMetricFamilyWithFunc(mf, &dto.LabelPair{ + Name: proto.String("status"), + Value: proto.String("success"), + }, func(m *dto.Metric) uint64 { + return m.Histogram.GetSampleCount() + }) + require.Equal(t, uint64(1), count) +} + func getValueFromMF(mf *dto.MetricFamily, lbs []*dto.LabelPair) float64 { + return getValueFromMetricFamilyWithFunc(mf, lbs[0], func(m *dto.Metric) float64 { return m.Counter.GetValue() }) +} + +func getValueFromMetricFamilyWithFunc[R any](mf *dto.MetricFamily, lbs *dto.LabelPair, f func(*dto.Metric) R) R { + eq := func(e *dto.LabelPair) bool { + return e.GetName() == lbs.GetName() && e.GetValue() == lbs.GetValue() + } + var zero R for _, m := range mf.Metric { - if !assert.ObjectsAreEqualValues(lbs, m.GetLabel()) { + if !slices.ContainsFunc(m.GetLabel(), eq) { continue } - - return m.Counter.GetValue() + return f(m) } - - return 0 + return zero } func assertCacheState(t *testing.T, metrics string, e *expectedCacheState) { diff --git a/integration/multi_tenant_queries_test.go b/integration/multi_tenant_queries_test.go index 468cc98de18a..76fbd63f13bd 100644 --- a/integration/multi_tenant_queries_test.go +++ b/integration/multi_tenant_queries_test.go @@ -13,6 +13,7 @@ import ( ) func TestMultiTenantQuery(t *testing.T) { + t.Skip("This test is flaky on CI but it's hardly reproducible locally.") clu := cluster.New(nil) defer func() { assert.NoError(t, clu.Cleanup()) @@ -36,24 +37,25 @@ func TestMultiTenantQuery(t *testing.T) { require.NoError(t, cliTenant2.PushLogLine("lineB", cliTenant2.Now.Add(-45*time.Minute), nil, map[string]string{"job": "fake2"})) // check that tenant1 only have access to log line A. - matchLines(t, cliTenant1, `{job="fake2"}`, []string{}) - matchLines(t, cliTenant1, `{job=~"fake.*"}`, []string{"lineA"}) - matchLines(t, cliTenant1, `{job="fake1"}`, []string{"lineA"}) + require.ElementsMatch(t, query(t, cliTenant1, `{job="fake2"}`), []string{}) + require.ElementsMatch(t, query(t, cliTenant1, `{job=~"fake.*"}`), []string{"lineA"}) + require.ElementsMatch(t, query(t, cliTenant1, `{job="fake1"}`), []string{"lineA"}) // check that tenant2 only have access to log line B. - matchLines(t, cliTenant2, `{job="fake1"}`, []string{}) - matchLines(t, cliTenant2, `{job=~"fake.*"}`, []string{"lineB"}) - matchLines(t, cliTenant2, `{job="fake2"}`, []string{"lineB"}) + require.ElementsMatch(t, query(t, cliTenant2, `{job="fake1"}`), []string{}) + require.ElementsMatch(t, query(t, cliTenant2, `{job=~"fake.*"}`), []string{"lineB"}) + require.ElementsMatch(t, query(t, cliTenant2, `{job="fake2"}`), []string{"lineB"}) // check that multitenant has access to all log lines on same query. - matchLines(t, cliMultitenant, `{job=~"fake.*"}`, []string{"lineA", "lineB"}) - matchLines(t, cliMultitenant, `{job="fake1"}`, []string{"lineA"}) - matchLines(t, cliMultitenant, `{job="fake2"}`, []string{"lineB"}) - matchLines(t, cliMultitenant, `{job="fake3"}`, []string{}) + require.ElementsMatch(t, query(t, cliMultitenant, `{job=~"fake.*"}`), []string{"lineA", "lineB"}) + require.ElementsMatch(t, query(t, cliMultitenant, `{job="fake1"}`), []string{"lineA"}) + require.ElementsMatch(t, query(t, cliMultitenant, `{job="fake2"}`), []string{"lineB"}) + require.ElementsMatch(t, query(t, cliMultitenant, `{job="fake3"}`), []string{}) } -func matchLines(t *testing.T, client *client.Client, labels string, expectedLines []string) { - resp, err := client.RunRangeQuery(context.Background(), labels) +func query(t *testing.T, client *client.Client, labels string) []string { + t.Helper() + resp, err := client.RunRangeQueryWithStartEnd(context.Background(), labels, client.Now.Add(-1*time.Hour), client.Now) require.NoError(t, err) var lines []string @@ -62,5 +64,5 @@ func matchLines(t *testing.T, client *client.Client, labels string, expectedLine lines = append(lines, val[1]) } } - require.ElementsMatch(t, expectedLines, lines) + return lines } diff --git a/integration/parse_metrics.go b/integration/parse_metrics.go index 46ea42497856..9f2bf5fc8fc2 100644 --- a/integration/parse_metrics.go +++ b/integration/parse_metrics.go @@ -13,16 +13,24 @@ var ( ErrInvalidMetricType = fmt.Errorf("invalid metric type") ) -func extractMetric(metricName, metrics string) (float64, map[string]string, error) { +func extractMetricFamily(name, metrics string) (*io_prometheus_client.MetricFamily, error) { var parser expfmt.TextParser mfs, err := parser.TextToMetricFamilies(strings.NewReader(metrics)) if err != nil { - return 0, nil, err + return nil, err + } + + mf, ok := mfs[name] + if !ok { + return nil, ErrNoMetricFound } + return mf, nil +} - mf, found := mfs[metricName] - if !found { - return 0, nil, ErrNoMetricFound +func extractMetric(metricName, metrics string) (float64, map[string]string, error) { + mf, err := extractMetricFamily(metricName, metrics) + if err != nil { + return 0, nil, err } var val float64 diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index 9ea61a0dba4e..2f57c42a78d7 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,9 @@ ## Main +- [11513](https://github.com/grafana/loki/pull/11513) **btaani**: Add a custom metric that collects Lokistacks requiring a schema upgrade +- [11718](https://github.com/grafana/loki/pull/11718) **periklis**: Upgrade k8s.io, sigs.k8s.io and openshift deps +- [11671](https://github.com/grafana/loki/pull/11671) **JoaoBraveCoding**: Update mixins to fix structured metadata dashboards +- [11624](https://github.com/grafana/loki/pull/11624) **xperimental**: React to changes in ConfigMap used for storage CA - [11481](https://github.com/grafana/loki/pull/11481) **JoaoBraveCoding**: Adds AWS STS support - [11533](https://github.com/grafana/loki/pull/11533) **periklis**: Add serviceaccount per LokiStack resource - [11158](https://github.com/grafana/loki/pull/11158) **btaani**: operator: Add warning for old schema configuration diff --git a/operator/apis/config/v1/projectconfig_types.go b/operator/apis/config/v1/projectconfig_types.go index b6a80175266b..488f7b2cb64f 100644 --- a/operator/apis/config/v1/projectconfig_types.go +++ b/operator/apis/config/v1/projectconfig_types.go @@ -2,7 +2,7 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - cfg "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" //nolint:staticcheck + configv1alpha1 "k8s.io/component-base/config/v1alpha1" ) // BuiltInCertManagement is the configuration for the built-in facility to generate and rotate @@ -142,6 +142,67 @@ const ( TLSProfileModernType TLSProfileType = "Modern" ) +// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +type ControllerManagerConfigurationSpec struct { + // LeaderElection is the LeaderElection config to be used when configuring + // the manager.Manager leader election + // +optional + LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + // Metrics contains the controller metrics configuration + // +optional + Metrics ControllerMetrics `json:"metrics,omitempty"` + + // Health contains the controller health configuration + // +optional + Health ControllerHealth `json:"health,omitempty"` + + // Webhook contains the controllers webhook configuration + // +optional + Webhook ControllerWebhook `json:"webhook,omitempty"` +} + +// ControllerMetrics defines the metrics configs. +type ControllerMetrics struct { + // BindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // +optional + BindAddress string `json:"bindAddress,omitempty"` +} + +// ControllerHealth defines the health configs. +type ControllerHealth struct { + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // It can be set to "0" or "" to disable serving the health probe. + // +optional + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` +} + +// ControllerWebhook defines the webhook server for the controller. +type ControllerWebhook struct { + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port. + // +optional + Port *int `json:"port,omitempty"` +} + +//+kubebuilder:object:root=true + +// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +type ControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // ControllerManagerConfiguration returns the contfigurations for controllers + ControllerManagerConfigurationSpec `json:",inline"` +} + +// Complete returns the configuration for controller-runtime. +func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { + return *c, nil +} + //+kubebuilder:object:root=true // ProjectConfig is the Schema for the projectconfigs API @@ -149,7 +210,7 @@ type ProjectConfig struct { metav1.TypeMeta `json:",inline"` // ControllerManagerConfigurationSpec returns the contfigurations for controllers - cfg.ControllerManagerConfigurationSpec `json:",inline"` + ControllerManagerConfigurationSpec `json:",inline"` Gates FeatureGates `json:"featureGates,omitempty"` } diff --git a/operator/apis/config/v1/zz_generated.deepcopy.go b/operator/apis/config/v1/zz_generated.deepcopy.go index ef20274e286e..f047818445aa 100644 --- a/operator/apis/config/v1/zz_generated.deepcopy.go +++ b/operator/apis/config/v1/zz_generated.deepcopy.go @@ -6,6 +6,7 @@ package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/component-base/config/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -23,6 +24,104 @@ func (in *BuiltInCertManagement) DeepCopy() *BuiltInCertManagement { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth. +func (in *ControllerHealth) DeepCopy() *ControllerHealth { + if in == nil { + return nil + } + out := new(ControllerHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration. +func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(ControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) { + *out = *in + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(v1alpha1.LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + out.Metrics = in.Metrics + out.Health = in.Health + in.Webhook.DeepCopyInto(&out.Webhook) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec. +func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerManagerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics. +func (in *ControllerMetrics) DeepCopy() *ControllerMetrics { + if in == nil { + return nil + } + out := new(ControllerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook. +func (in *ControllerWebhook) DeepCopy() *ControllerWebhook { + if in == nil { + return nil + } + out := new(ControllerWebhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGates) DeepCopyInto(out *FeatureGates) { *out = *in diff --git a/operator/apis/loki/go.mod b/operator/apis/loki/go.mod index 07594c20c6a5..72171d939351 100644 --- a/operator/apis/loki/go.mod +++ b/operator/apis/loki/go.mod @@ -6,7 +6,7 @@ require ( github.com/stretchr/testify v1.8.2 k8s.io/api v0.26.9 k8s.io/apimachinery v0.26.9 - k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e sigs.k8s.io/controller-runtime v0.14.5 ) diff --git a/operator/apis/loki/go.sum b/operator/apis/loki/go.sum index 3180306f546d..0a9dba3419bb 100644 --- a/operator/apis/loki/go.sum +++ b/operator/apis/loki/go.sum @@ -82,8 +82,8 @@ k8s.io/apimachinery v0.26.9 h1:5yAV9cFR7Z4gIorKcAjWnx4uxtxiFsERwq4Pvmx0CCg= k8s.io/apimachinery v0.26.9/go.mod h1:qYzLkrQ9lhrZRh0jNKo2cfvf/R1/kQONnSiyB7NUJU0= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY= -k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= diff --git a/operator/apis/loki/v1beta1/rulerconfig_types_test.go b/operator/apis/loki/v1beta1/rulerconfig_types_test.go index 6e133811b319..81c3ec8ca27e 100644 --- a/operator/apis/loki/v1beta1/rulerconfig_types_test.go +++ b/operator/apis/loki/v1beta1/rulerconfig_types_test.go @@ -7,7 +7,7 @@ import ( "github.com/grafana/loki/operator/apis/loki/v1beta1" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestConvertToV1_RulerConfig(t *testing.T) { @@ -79,19 +79,19 @@ func TestConvertToV1_RulerConfig(t *testing.T) { }, Client: &v1beta1.AlertManagerClientConfig{ TLS: &v1beta1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path"), - ServerName: pointer.String("server"), - CertPath: pointer.String("/tls/cert/path"), - KeyPath: pointer.String("/tls/key/path"), + CAPath: ptr.To("/tls/ca/path"), + ServerName: ptr.To("server"), + CertPath: ptr.To("/tls/cert/path"), + KeyPath: ptr.To("/tls/key/path"), }, HeaderAuth: &v1beta1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type"), - Credentials: pointer.String("creds"), - CredentialsFile: pointer.String("creds-file"), + Type: ptr.To("type"), + Credentials: ptr.To("creds"), + CredentialsFile: ptr.To("creds-file"), }, BasicAuth: &v1beta1.AlertManagerClientBasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, }, }, @@ -175,19 +175,19 @@ func TestConvertToV1_RulerConfig(t *testing.T) { }, Client: &v1beta1.AlertManagerClientConfig{ TLS: &v1beta1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1beta1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1beta1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, @@ -234,19 +234,19 @@ func TestConvertToV1_RulerConfig(t *testing.T) { }, Client: &v1beta1.AlertManagerClientConfig{ TLS: &v1beta1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1beta1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1beta1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, @@ -322,19 +322,19 @@ func TestConvertToV1_RulerConfig(t *testing.T) { }, Client: &v1.AlertManagerClientConfig{ TLS: &v1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path"), - ServerName: pointer.String("server"), - CertPath: pointer.String("/tls/cert/path"), - KeyPath: pointer.String("/tls/key/path"), + CAPath: ptr.To("/tls/ca/path"), + ServerName: ptr.To("server"), + CertPath: ptr.To("/tls/cert/path"), + KeyPath: ptr.To("/tls/key/path"), }, HeaderAuth: &v1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type"), - Credentials: pointer.String("creds"), - CredentialsFile: pointer.String("creds-file"), + Type: ptr.To("type"), + Credentials: ptr.To("creds"), + CredentialsFile: ptr.To("creds-file"), }, BasicAuth: &v1.AlertManagerClientBasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, }, }, @@ -418,19 +418,19 @@ func TestConvertToV1_RulerConfig(t *testing.T) { }, Client: &v1.AlertManagerClientConfig{ TLS: &v1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, @@ -477,19 +477,19 @@ func TestConvertToV1_RulerConfig(t *testing.T) { }, Client: &v1.AlertManagerClientConfig{ TLS: &v1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, @@ -593,19 +593,19 @@ func TestConvertFromV1_RulerConfig(t *testing.T) { }, Client: &v1.AlertManagerClientConfig{ TLS: &v1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path"), - ServerName: pointer.String("server"), - CertPath: pointer.String("/tls/cert/path"), - KeyPath: pointer.String("/tls/key/path"), + CAPath: ptr.To("/tls/ca/path"), + ServerName: ptr.To("server"), + CertPath: ptr.To("/tls/cert/path"), + KeyPath: ptr.To("/tls/key/path"), }, HeaderAuth: &v1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type"), - Credentials: pointer.String("creds"), - CredentialsFile: pointer.String("creds-file"), + Type: ptr.To("type"), + Credentials: ptr.To("creds"), + CredentialsFile: ptr.To("creds-file"), }, BasicAuth: &v1.AlertManagerClientBasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, }, }, @@ -689,19 +689,19 @@ func TestConvertFromV1_RulerConfig(t *testing.T) { }, Client: &v1.AlertManagerClientConfig{ TLS: &v1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, @@ -748,19 +748,19 @@ func TestConvertFromV1_RulerConfig(t *testing.T) { }, Client: &v1.AlertManagerClientConfig{ TLS: &v1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, @@ -836,19 +836,19 @@ func TestConvertFromV1_RulerConfig(t *testing.T) { }, Client: &v1beta1.AlertManagerClientConfig{ TLS: &v1beta1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path"), - ServerName: pointer.String("server"), - CertPath: pointer.String("/tls/cert/path"), - KeyPath: pointer.String("/tls/key/path"), + CAPath: ptr.To("/tls/ca/path"), + ServerName: ptr.To("server"), + CertPath: ptr.To("/tls/cert/path"), + KeyPath: ptr.To("/tls/key/path"), }, HeaderAuth: &v1beta1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type"), - Credentials: pointer.String("creds"), - CredentialsFile: pointer.String("creds-file"), + Type: ptr.To("type"), + Credentials: ptr.To("creds"), + CredentialsFile: ptr.To("creds-file"), }, BasicAuth: &v1beta1.AlertManagerClientBasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, }, }, @@ -932,19 +932,19 @@ func TestConvertFromV1_RulerConfig(t *testing.T) { }, Client: &v1beta1.AlertManagerClientConfig{ TLS: &v1beta1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1beta1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1beta1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, @@ -991,19 +991,19 @@ func TestConvertFromV1_RulerConfig(t *testing.T) { }, Client: &v1beta1.AlertManagerClientConfig{ TLS: &v1beta1.AlertManagerClientTLSConfig{ - CAPath: pointer.String("/tls/ca/path-1"), - ServerName: pointer.String("server-1"), - CertPath: pointer.String("/tls/cert/path-1"), - KeyPath: pointer.String("/tls/key/path-1"), + CAPath: ptr.To("/tls/ca/path-1"), + ServerName: ptr.To("server-1"), + CertPath: ptr.To("/tls/cert/path-1"), + KeyPath: ptr.To("/tls/key/path-1"), }, HeaderAuth: &v1beta1.AlertManagerClientHeaderAuth{ - Type: pointer.String("type-1"), - Credentials: pointer.String("creds-1"), - CredentialsFile: pointer.String("creds-file-1"), + Type: ptr.To("type-1"), + Credentials: ptr.To("creds-1"), + CredentialsFile: ptr.To("creds-file-1"), }, BasicAuth: &v1beta1.AlertManagerClientBasicAuth{ - Username: pointer.String("user-1"), - Password: pointer.String("pass-1"), + Username: ptr.To("user-1"), + Password: ptr.To("pass-1"), }, }, }, diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index 2915af504fd3..1e9f05cc372d 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.5.0 - createdAt: "2024-01-10T18:25:00Z" + createdAt: "2024-01-19T14:20:59Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. features.operators.openshift.io/disconnected: "true" diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index b78b8f6d30b9..4d88a93a9250 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.5.0 - createdAt: "2024-01-10T18:24:59Z" + createdAt: "2024-01-19T14:20:57Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index b0fca996ce78..2adb57379fc7 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -150,7 +150,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:0.1.0 - createdAt: "2024-01-10T18:25:02Z" + createdAt: "2024-01-19T14:21:01Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements diff --git a/operator/config/docs/config.json b/operator/config/docs/config.json index 3d912e857d2b..fb1b7d8a11b2 100644 --- a/operator/config/docs/config.json +++ b/operator/config/docs/config.json @@ -4,7 +4,13 @@ ], "hideTypePatterns": [ "ParseError$", - "List$" + "List$", + "ControllerHealth$", + "ControllerManagerConfiguration$", + "ControllerManagerConfigurationSpec$", + "ControllerMetrics$", + "ControllerWebhook$", + "ProjectConfig$" ], "externalPackages": [ { @@ -38,22 +44,6 @@ { "typeMatchPrefix": "^k8s\\.io/component-base/config/v1alpha1\\.LeaderElectionConfiguration$", "docsURLTemplate": "https://pkg.go.dev/k8s.io/component-base/config#LeaderElectionConfiguration" - }, - { - "typeMatchPrefix": "^sigs\\.k8s\\.io/controller-runtime/pkg/config/v1alpha1\\.ControllerConfigurationSpec$", - "docsURLTemplate": "https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1#ControllerConfigurationSpec" - }, - { - "typeMatchPrefix": "^sigs\\.k8s\\.io/controller-runtime/pkg/config/v1alpha1\\.ControllerMetrics$", - "docsURLTemplate": "https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1#ControllerMetrics" - }, - { - "typeMatchPrefix": "^sigs\\.k8s\\.io/controller-runtime/pkg/config/v1alpha1\\.ControllerHealth$", - "docsURLTemplate": "https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1#ControllerHealth" - }, - { - "typeMatchPrefix": "^sigs\\.k8s\\.io/controller-runtime/pkg/config/v1alpha1\\.ControllerWebhook$", - "docsURLTemplate": "https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1#ControllerWebhook" } ], "typeDisplayNamePrefixOverrides": { @@ -66,4 +56,4 @@ "github.com/grafana/loki/operator/apis/loki/config/v1": "Feature Gates" }, "markdownDisabled": false -} \ No newline at end of file +} diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/controllers/loki/lokistack_controller.go index 487390d7287b..629ee85d5edd 100644 --- a/operator/controllers/loki/lokistack_controller.go +++ b/operator/controllers/loki/lokistack_controller.go @@ -94,12 +94,7 @@ var ( }) createUpdateOrDeletePred = builder.WithPredicates(predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - if e.ObjectOld.GetGeneration() == 0 && len(e.ObjectOld.GetAnnotations()) == 0 { - return e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() - } - - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() || - cmp.Diff(e.ObjectOld.GetAnnotations(), e.ObjectNew.GetAnnotations()) != "" + return e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() }, CreateFunc: func(e event.CreateEvent) bool { return true }, DeleteFunc: func(e event.DeleteEvent) bool { return true }, @@ -207,7 +202,8 @@ func (r *LokiStackReconciler) buildController(bld k8s.Builder) error { Owns(&rbacv1.Role{}, updateOrDeleteOnlyPred). Owns(&rbacv1.RoleBinding{}, updateOrDeleteOnlyPred). Watches(&corev1.Service{}, r.enqueueForAlertManagerServices(), createUpdateOrDeletePred). - Watches(&corev1.Secret{}, r.enqueueForStorageSecret(), createUpdateOrDeletePred) + Watches(&corev1.Secret{}, r.enqueueForStorageSecret(), createUpdateOrDeletePred). + Watches(&corev1.ConfigMap{}, r.enqueueForStorageCA(), createUpdateOrDeletePred) if r.FeatureGates.LokiStackAlerts { bld = bld.Owns(&monitoringv1.PrometheusRule{}, updateOrDeleteOnlyPred) @@ -324,3 +320,35 @@ func (r *LokiStackReconciler) enqueueForStorageSecret() handler.EventHandler { return requests }) } + +func (r *LokiStackReconciler) enqueueForStorageCA() handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + lokiStacks := &lokiv1.LokiStackList{} + if err := r.Client.List(ctx, lokiStacks, client.InNamespace(obj.GetNamespace())); err != nil { + r.Log.Error(err, "Error listing LokiStack resources for storage CA update") + return nil + } + + var requests []reconcile.Request + for _, stack := range lokiStacks.Items { + if stack.Spec.Storage.TLS == nil { + continue + } + + storageTLS := stack.Spec.Storage.TLS + if obj.GetName() != storageTLS.CA { + continue + } + + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: stack.Namespace, + Name: stack.Name, + }, + }) + r.Log.Info("Enqueued request for LokiStack because of Storage CA resource change", "LokiStack", stack.Name, "ConfigMap", obj.GetName()) + } + + return requests + }) +} diff --git a/operator/controllers/loki/lokistack_controller_test.go b/operator/controllers/loki/lokistack_controller_test.go index d8eae5a1ec66..7421b63331b5 100644 --- a/operator/controllers/loki/lokistack_controller_test.go +++ b/operator/controllers/loki/lokistack_controller_test.go @@ -203,8 +203,8 @@ func TestLokiStackController_RegisterWatchedResources(t *testing.T) { table := []test{ { src: &openshiftconfigv1.APIServer{}, - index: 2, - watchesCallsCount: 3, + index: 3, + watchesCallsCount: 4, featureGates: configv1.FeatureGates{ OpenShift: configv1.OpenShiftFeatureGates{ ClusterTLSPolicy: true, @@ -214,8 +214,8 @@ func TestLokiStackController_RegisterWatchedResources(t *testing.T) { }, { src: &openshiftconfigv1.Proxy{}, - index: 2, - watchesCallsCount: 3, + index: 3, + watchesCallsCount: 4, featureGates: configv1.FeatureGates{ OpenShift: configv1.OpenShiftFeatureGates{ ClusterProxy: true, @@ -226,14 +226,21 @@ func TestLokiStackController_RegisterWatchedResources(t *testing.T) { { src: &corev1.Service{}, index: 0, - watchesCallsCount: 2, + watchesCallsCount: 3, featureGates: configv1.FeatureGates{}, pred: createUpdateOrDeletePred, }, { src: &corev1.Secret{}, index: 1, - watchesCallsCount: 2, + watchesCallsCount: 3, + featureGates: configv1.FeatureGates{}, + pred: createUpdateOrDeletePred, + }, + { + src: &corev1.ConfigMap{}, + index: 2, + watchesCallsCount: 3, featureGates: configv1.FeatureGates{}, pred: createUpdateOrDeletePred, }, diff --git a/operator/docs/lokistack/sop.md b/operator/docs/lokistack/sop.md index 8c437bd53b67..1ae656e1d5e8 100644 --- a/operator/docs/lokistack/sop.md +++ b/operator/docs/lokistack/sop.md @@ -308,3 +308,28 @@ The query queue is currently under high load. ### Steps - Increase the number of queriers + +## Lokistack Storage Schema Warning + +### Impact + +The LokiStack warns on a newer object storage schema being available for configuration. + +### Summary + +The schema configuration does not contain the most recent schema version and needs an update. + +### Severity + +`Warning` + +### Access Required + +- Console access to the cluster +- Edit access to the namespace where the LokiStack is deployed: + - OpenShift + - `openshift-logging` (LokiStack) + +### Steps + +- Add a new object storage schema V13 with a future EffectiveDate \ No newline at end of file diff --git a/operator/docs/operator/feature-gates.md b/operator/docs/operator/feature-gates.md index 7475dfa8a1cc..1d5c046be775 100644 --- a/operator/docs/operator/feature-gates.md +++ b/operator/docs/operator/feature-gates.md @@ -99,9 +99,6 @@ The refresh is applied to all LokiStack certificates at once.

## FeatureGates { #config-loki-grafana-com-v1-FeatureGates } -

-(Appears on:ProjectConfig) -

FeatureGates is the supported set of all operator feature gates.

@@ -415,156 +412,6 @@ bool -## ProjectConfig { #config-loki-grafana-com-v1-ProjectConfig } -
-

ProjectConfig is the Schema for the projectconfigs API

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-syncPeriod
- - -Kubernetes meta/v1.Duration - - -
-(Optional) -

SyncPeriod determines the minimum frequency at which watched resources are -reconciled. A lower period will correct entropy more quickly, but reduce -responsiveness to change if there are many watched resources. Change this -value only if you know what you are doing. Defaults to 10 hours if unset. -there will a 10 percent jitter between the SyncPeriod of all controllers -so that all controllers will not send list requests simultaneously.

-
-leaderElection
- - -Kubernetes v1alpha1.LeaderElectionConfiguration - - -
-(Optional) -

LeaderElection is the LeaderElection config to be used when configuring -the manager.Manager leader election

-
-cacheNamespace
- -string - -
-(Optional) -

CacheNamespace if specified restricts the manager’s cache to watch objects in -the desired namespace Defaults to all namespaces

-

Note: If a namespace is specified, controllers can still Watch for a -cluster-scoped resource (e.g Node). For namespaced resources the cache -will only hold objects from the desired namespace.

-
-gracefulShutDown
- - -Kubernetes meta/v1.Duration - - -
-

GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. -To disable graceful shutdown, set to time.Duration(0) -To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) -The graceful shutdown is skipped for safety reasons in case the leader election lease is lost.

-
-controller
- - -K8S Controller-runtime v1alpha1.ControllerConfigurationSpec - - -
-(Optional) -

Controller contains global configuration options for controllers -registered within this manager.

-
-metrics
- - -K8S Controller-runtime v1alpha1.ControllerMetrics - - -
-(Optional) -

Metrics contains the controller metrics configuration

-
-health
- - -K8S Controller-runtime v1alpha1.ControllerHealth - - -
-(Optional) -

Health contains the controller health configuration

-
-webhook
- - -K8S Controller-runtime v1alpha1.ControllerWebhook - - -
-(Optional) -

Webhook contains the controllers webhook configuration

-
-featureGates
- - -FeatureGates - - -
-
- ## TLSProfileType { #config-loki-grafana-com-v1-TLSProfileType } (string alias)
diff --git a/operator/go.mod b/operator/go.mod index 4ffc3899d11c..6f02675596bd 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -11,20 +11,21 @@ require ( github.com/grafana/loki/operator/apis/loki v0.0.0-00010101000000-000000000000 github.com/imdario/mergo v0.3.13 github.com/maxbrunsfeld/counterfeiter/v6 v6.7.0 - github.com/openshift/api v0.0.0-20231031181504-3be12e93388f // release-4.14 - github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123 + github.com/openshift/api v0.0.0-20240116035456-11ed2fbcb805 // release-4.15 + github.com/openshift/library-go v0.0.0-20240117151256-95b334bccb5d // release-4.15 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.1 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.42.0 github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.27.7 - k8s.io/apimachinery v0.27.7 - k8s.io/apiserver v0.27.7 - k8s.io/client-go v0.27.7 - k8s.io/utils v0.0.0-20230505201702-9f6742963106 - sigs.k8s.io/controller-runtime v0.15.3 + k8s.io/api v0.28.6 + k8s.io/apimachinery v0.28.6 + k8s.io/apiserver v0.28.6 + k8s.io/client-go v0.28.6 + k8s.io/component-base v0.28.6 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e + sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/yaml v1.3.0 ) @@ -39,13 +40,13 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.4.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect @@ -54,7 +55,7 @@ require ( github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -63,7 +64,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 // indirect @@ -121,40 +122,44 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d // indirect github.com/weaveworks/promrus v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.7 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect - go.etcd.io/etcd/client/v3 v3.5.7 // indirect + go.etcd.io/etcd/api/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/v3 v3.5.9 // indirect go.opentelemetry.io/otel v1.11.2 // indirect go.opentelemetry.io/otel/trace v1.11.2 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.1 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.24.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.25.0 // indirect go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect golang.org/x/crypto v0.17.0 // indirect golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.17.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.19.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.3.0 // indirect + golang.org/x/sync v0.5.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.12.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect + golang.org/x/tools v0.16.1 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 // indirect - google.golang.org/grpc v1.52.3 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/grpc v1.56.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.27.7 // indirect - k8s.io/component-base v0.27.7 // indirect + k8s.io/apiextensions-apiserver v0.28.3 // indirect k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) replace github.com/grafana/loki/operator/apis/loki => ./apis/loki + +// Replace v2.4.0+incompatible indirect refs with v5.1.1 for compatibility with google.golang.org/grpc >=v1.56.3 +replace github.com/sercand/kuberesolver => github.com/sercand/kuberesolver/v5 v5.1.1 diff --git a/operator/go.sum b/operator/go.sum index 4383e691d41a..0d66cc79ec3b 100644 --- a/operator/go.sum +++ b/operator/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,6 +16,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -26,34 +28,489 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= @@ -68,6 +525,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= @@ -78,7 +536,11 @@ github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ViaQ/logerr/v2 v2.1.0 h1:8WwzuNa1x+a6tRUl+6sFel83A/QxlFBUaFW2FyG2zzY= github.com/ViaQ/logerr/v2 v2.1.0/go.mod h1:/qoWLm3YG40Sv5u75s4fvzjZ5p36xINzaxU2L+DJ9uw= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -86,7 +548,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= @@ -98,15 +563,19 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA= github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -122,17 +591,22 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -145,12 +619,13 @@ github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jm github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -160,10 +635,15 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -174,9 +654,15 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -186,6 +672,8 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -201,14 +689,17 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -223,9 +714,11 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -266,8 +759,9 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -292,6 +786,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -301,6 +796,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -310,9 +806,14 @@ github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7Y github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -320,7 +821,11 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= @@ -338,6 +843,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xC github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= @@ -393,6 +900,7 @@ github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfE github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -419,24 +927,35 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -453,6 +972,7 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -463,6 +983,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= @@ -492,14 +1014,14 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/openshift/api v0.0.0-20231031181504-3be12e93388f h1:j7QYl057TBD/lxWAGCr4IrbdG7IMg2U60DFK+BKt3VI= -github.com/openshift/api v0.0.0-20231031181504-3be12e93388f/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= -github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123 h1:JfXG50f8yVud5xakwTHoqD00+3HYdLmZuEqn5Sq8ZRQ= -github.com/openshift/library-go v0.0.0-20231103161458-0ec67489d123/go.mod h1:ZFwNwC3opc/7aOvzUbU95zp33Lbxet48h80ryH3p6DY= +github.com/openshift/api v0.0.0-20240116035456-11ed2fbcb805 h1:5NjcOG5i+WH0F4FI8dKSf0fNgX0YQkrJ8w3YcsHx6KM= +github.com/openshift/api v0.0.0-20240116035456-11ed2fbcb805/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU= +github.com/openshift/library-go v0.0.0-20240117151256-95b334bccb5d h1:jDgYsLszzWSgxr0Tas9+L0F2pIu0mngCLv6BA5vubQ4= +github.com/openshift/library-go v0.0.0-20240117151256-95b334bccb5d/go.mod h1:0q1UIvboZXfSlUaK+08wsXYw4N6OUo2b/z3a1EWNGyw= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -513,10 +1035,17 @@ github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -531,12 +1060,15 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -546,6 +1078,7 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -559,21 +1092,27 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI= github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12 h1:Aaz4T7dZp7cB2cv7D/tGtRdSMh48sRaDYr7Jh0HV4qQ= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= -github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= +github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -582,11 +1121,13 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -615,18 +1156,22 @@ github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d h1:9Z/HiqeGN+LOn github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d/go.mod h1:Fnq3+U51tMkPRMC6Wr7zKGUeFFYX4YjNrNK50iU0fcE= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= -go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= -go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= -go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= -go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= -go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= +go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= +go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= +go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -634,34 +1179,39 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= @@ -673,17 +1223,29 @@ golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 h1:kWC3b7j6Fu09SnEBr7P4PuQyM0R6sqyH9R+EjIvT1nQ= golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -707,9 +1269,14 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -744,6 +1311,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -752,6 +1320,8 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -760,13 +1330,21 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -787,7 +1365,14 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -803,9 +1388,11 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -852,12 +1439,15 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -866,8 +1456,11 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -881,19 +1474,30 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -905,12 +1509,19 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -932,6 +1543,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -962,10 +1574,12 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -973,9 +1587,13 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -983,12 +1601,17 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= -gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1025,9 +1648,25 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1069,12 +1708,13 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1107,18 +1747,67 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 h1:O97sLx/Xmb/KIZHB/2/BzofxBs5QmmR0LcihPtllmbc= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1147,13 +1836,22 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1170,6 +1868,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -1193,8 +1892,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1205,30 +1902,65 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.7 h1:7yG4D3t/q4utJe2ptlRw9aPuxcSmroTsYxsofkQNl/A= -k8s.io/api v0.27.7/go.mod h1:ZNExI/Lhrs9YrLgVWx6jjHZdoWCTXfBXuFjt1X6olro= -k8s.io/apiextensions-apiserver v0.27.7 h1:YqIOwZAUokzxJIjunmUd4zS1v3JhK34EPXn+pP0/bsU= -k8s.io/apiextensions-apiserver v0.27.7/go.mod h1:x0p+b5a955lfPz9gaDeBy43obM12s+N9dNHK6+dUL+g= -k8s.io/apimachinery v0.27.7 h1:Gxgtb7Y/Rsu8ymgmUEaiErkxa6RY4oTd8kNUI6SUR58= -k8s.io/apimachinery v0.27.7/go.mod h1:jBGQgTjkw99ef6q5hv1YurDd3BqKDk9YRxmX0Ozo0i8= -k8s.io/apiserver v0.27.7 h1:E8sDHwfUug82YC1++qvE73QxihaXDqT4tr8XYBOEtc4= -k8s.io/apiserver v0.27.7/go.mod h1:OrLG9RwCOerutAlo8QJW5EHzUG9Dad7k6rgcDUNSO/w= -k8s.io/client-go v0.27.7 h1:+Xgh9OOKv6A3qdD4Dnl/0VOI5EvAv+0s/OseDxVVTwQ= -k8s.io/client-go v0.27.7/go.mod h1:dZ2kqcalYp5YZ2EV12XIMc77G6PxHWOJp/kclZr4+5Q= -k8s.io/component-base v0.27.7 h1:kngM58HR9W9Nqpv7e4rpdRyWnKl/ABpUhLAZ+HoliMs= -k8s.io/component-base v0.27.7/go.mod h1:YGjlCVL1oeKvG3HSciyPHFh+LCjIEqsxz4BDR3cfHRs= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/api v0.28.6 h1:yy6u9CuIhmg55YvF/BavPBBXB+5QicB64njJXxVnzLo= +k8s.io/api v0.28.6/go.mod h1:AM6Ys6g9MY3dl/XNaNfg/GePI0FT7WBGu8efU/lirAo= +k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= +k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= +k8s.io/apimachinery v0.28.6 h1:RsTeR4z6S07srPg6XYrwXpTJVMXsjPXn0ODakMytSW0= +k8s.io/apimachinery v0.28.6/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= +k8s.io/apiserver v0.28.6 h1:SfS5v4I5UGvh0q/1rzvNwLFsK+r7YzcsixnUc0NwoEk= +k8s.io/apiserver v0.28.6/go.mod h1:8n0aerS3kPm9usyB8B+an6/BZ5+Fa9fNqlASFdDDVwk= +k8s.io/client-go v0.28.6 h1:Gge6ziyIdafRchfoBKcpaARuz7jfrK1R1azuwORIsQI= +k8s.io/client-go v0.28.6/go.mod h1:+nu0Yp21Oeo/cBCsprNVXB2BfJTV51lFfe5tXl2rUL8= +k8s.io/component-base v0.28.6 h1:G4T8VrcQ7xZou3by/fY5NU5mfxOBlWaivS2lPrEltAo= +k8s.io/component-base v0.28.6/go.mod h1:Dg62OOG3ALu2P4nAG00UdsuHoNLQJ5VsUZKQlLDcS+E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= -k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= -sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/operator/internal/config/loader.go b/operator/internal/config/loader.go new file mode 100644 index 000000000000..b5af090ddb88 --- /dev/null +++ b/operator/internal/config/loader.go @@ -0,0 +1,30 @@ +package config + +import ( + "errors" + "fmt" + "os" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + + configv1 "github.com/grafana/loki/operator/apis/config/v1" +) + +var errConfigFileLoading = errors.New("could not read file at path") + +func loadConfigFile(scheme *runtime.Scheme, configFile string) (*configv1.ProjectConfig, error) { + content, err := os.ReadFile(configFile) + if err != nil { + return nil, fmt.Errorf("%w %s", errConfigFileLoading, configFile) + } + + codecs := serializer.NewCodecFactory(scheme) + + outConfig := &configv1.ProjectConfig{} + if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, outConfig); err != nil { + return nil, fmt.Errorf("could not decode file into runtime.Object: %w", err) + } + + return outConfig, nil +} diff --git a/operator/internal/config/options.go b/operator/internal/config/options.go new file mode 100644 index 000000000000..7ed9abb526a7 --- /dev/null +++ b/operator/internal/config/options.go @@ -0,0 +1,106 @@ +package config + +import ( + "fmt" + "net/http" + "net/http/pprof" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + configv1 "github.com/grafana/loki/operator/apis/config/v1" +) + +// LoadConfig initializes the controller configuration, optionally overriding the defaults +// from a provided configuration file. +func LoadConfig(scheme *runtime.Scheme, configFile string) (*configv1.ProjectConfig, ctrl.Options, error) { + options := ctrl.Options{Scheme: scheme} + if configFile == "" { + return &configv1.ProjectConfig{}, options, nil + } + + ctrlCfg, err := loadConfigFile(scheme, configFile) + if err != nil { + return nil, options, fmt.Errorf("failed to parse controller manager config file: %w", err) + } + + options = mergeOptionsFromFile(options, ctrlCfg) + return ctrlCfg, options, nil +} + +func mergeOptionsFromFile(o manager.Options, cfg *configv1.ProjectConfig) manager.Options { + o = setLeaderElectionConfig(o, cfg.ControllerManagerConfigurationSpec) + + if o.Metrics.BindAddress == "" && cfg.Metrics.BindAddress != "" { + o.Metrics.BindAddress = cfg.Metrics.BindAddress + + endpoints := map[string]http.HandlerFunc{ + "/debug/pprof/": pprof.Index, + "/debug/pprof/cmdline": pprof.Cmdline, + "/debug/pprof/profile": pprof.Profile, + "/debug/pprof/symbol": pprof.Symbol, + "/debug/pprof/trace": pprof.Trace, + } + + if o.Metrics.ExtraHandlers == nil { + o.Metrics.ExtraHandlers = map[string]http.Handler{} + } + + for path, handler := range endpoints { + o.Metrics.ExtraHandlers[path] = handler + } + } + + if o.HealthProbeBindAddress == "" && cfg.Health.HealthProbeBindAddress != "" { + o.HealthProbeBindAddress = cfg.Health.HealthProbeBindAddress + } + + if cfg.Webhook.Port != nil { + o.WebhookServer = webhook.NewServer(webhook.Options{ + Port: *cfg.Webhook.Port, + }) + } + + return o +} + +func setLeaderElectionConfig(o manager.Options, obj configv1.ControllerManagerConfigurationSpec) manager.Options { + if obj.LeaderElection == nil { + // The source does not have any configuration; noop + return o + } + + if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil { + o.LeaderElection = *obj.LeaderElection.LeaderElect + } + + if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" { + o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock + } + + if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" { + o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace + } + + if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" { + o.LeaderElectionID = obj.LeaderElection.ResourceName + } + + if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) { + o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration + } + + if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) { + o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration + } + + if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) { + o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration + } + + return o +} diff --git a/operator/internal/handlers/dashboards_create.go b/operator/internal/handlers/dashboards_create.go index 32b779009a97..af5339f10f59 100644 --- a/operator/internal/handlers/dashboards_create.go +++ b/operator/internal/handlers/dashboards_create.go @@ -8,7 +8,7 @@ import ( "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" //nolint:typecheck ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/grafana/loki/operator/internal/external/k8s" diff --git a/operator/internal/handlers/internal/gateway/base_domain.go b/operator/internal/handlers/internal/gateway/base_domain.go index 5bdea31658d1..893659ca5d29 100644 --- a/operator/internal/handlers/internal/gateway/base_domain.go +++ b/operator/internal/handlers/internal/gateway/base_domain.go @@ -6,7 +6,6 @@ import ( "github.com/ViaQ/logerr/v2/kverrors" configv1 "github.com/openshift/api/config/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" @@ -14,11 +13,11 @@ import ( "github.com/grafana/loki/operator/internal/status" ) -// GetOpenShiftBaseDomain returns the cluster DNS base domain on OpenShift +// getOpenShiftBaseDomain returns the cluster DNS base domain on OpenShift // clusters to auto-create redirect URLs for OpenShift Auth or an error. // If the config.openshift.io/DNS object is not found the whole lokistack // resoure is set to a degraded state. -func GetOpenShiftBaseDomain(ctx context.Context, k k8s.Client, req ctrl.Request) (string, error) { +func getOpenShiftBaseDomain(ctx context.Context, k k8s.Client) (string, error) { var cluster configv1.DNS key := client.ObjectKey{Name: "cluster"} if err := k.Get(ctx, key, &cluster); err != nil { diff --git a/operator/internal/handlers/internal/gateway/gateway.go b/operator/internal/handlers/internal/gateway/gateway.go new file mode 100644 index 000000000000..0b05801f2e9a --- /dev/null +++ b/operator/internal/handlers/internal/gateway/gateway.go @@ -0,0 +1,87 @@ +package gateway + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + + configv1 "github.com/grafana/loki/operator/apis/config/v1" + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s" + "github.com/grafana/loki/operator/internal/handlers/internal/openshift" + "github.com/grafana/loki/operator/internal/manifests" + "github.com/grafana/loki/operator/internal/status" +) + +// BuildOptions returns the options needed to generate Kubernetes resource +// manifests for the lokistack-gateway. +// The returned error can be a status.DegradedError in the following cases: +// - The tenants spec is missing. +// - The tenants spec is invalid. +func BuildOptions(ctx context.Context, log logr.Logger, k k8s.Client, stack *lokiv1.LokiStack, fg configv1.FeatureGates) (string, manifests.Tenants, error) { + var ( + err error + baseDomain string + secrets []*manifests.TenantSecrets + configs map[string]manifests.TenantConfig + tenants manifests.Tenants + ) + + if !fg.LokiStackGateway { + return "", tenants, nil + } + + if stack.Spec.Tenants == nil { + return "", tenants, &status.DegradedError{ + Message: "Invalid tenants configuration: TenantsSpec cannot be nil when gateway flag is enabled", + Reason: lokiv1.ReasonInvalidTenantsConfiguration, + Requeue: false, + } + } + + if err = validateModes(stack); err != nil { + return "", tenants, &status.DegradedError{ + Message: fmt.Sprintf("Invalid tenants configuration: %s", err), + Reason: lokiv1.ReasonInvalidTenantsConfiguration, + Requeue: false, + } + } + + switch stack.Spec.Tenants.Mode { + case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork: + baseDomain, err = getOpenShiftBaseDomain(ctx, k) + if err != nil { + return "", tenants, err + } + + if stack.Spec.Proxy == nil { + // If the LokiStack has no proxy set but there is a cluster-wide proxy setting, + // set the LokiStack proxy to that. + ocpProxy, proxyErr := openshift.GetProxy(ctx, k) + if proxyErr != nil { + return "", tenants, proxyErr + } + + stack.Spec.Proxy = ocpProxy + } + default: + secrets, err = getTenantSecrets(ctx, k, stack) + if err != nil { + return "", tenants, err + } + } + + // extract the existing tenant's id, cookieSecret if exists, otherwise create new. + configs, err = getTenantConfigFromSecret(ctx, k, stack) + if err != nil { + log.Error(err, "error in getting tenant secret data") + } + + tenants = manifests.Tenants{ + Secrets: secrets, + Configs: configs, + } + + return baseDomain, tenants, nil +} diff --git a/operator/internal/handlers/internal/gateway/gateway_test.go b/operator/internal/handlers/internal/gateway/gateway_test.go new file mode 100644 index 000000000000..2c8f846f5582 --- /dev/null +++ b/operator/internal/handlers/internal/gateway/gateway_test.go @@ -0,0 +1,390 @@ +package gateway + +import ( + "context" + "io" + "testing" + + "github.com/ViaQ/logerr/v2/log" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + configv1 "github.com/grafana/loki/operator/apis/config/v1" + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" + "github.com/grafana/loki/operator/internal/status" +) + +var ( + logger = log.NewLogger("testing", log.WithOutput(io.Discard)) + + defaultSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "endpoint": []byte("s3://your-endpoint"), + "region": []byte("a-region"), + "bucketnames": []byte("bucket1,bucket2"), + "access_key_id": []byte("a-secret-id"), + "access_key_secret": []byte("a-secret-key"), + }, + } + + defaultGatewaySecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-gateway-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "clientID": []byte("client-123"), + "clientSecret": []byte("client-secret-xyz"), + "issuerCAPath": []byte("/tmp/test/ca.pem"), + }, + } + + invalidSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{}, + } +) + +func TestBuildOptions_WhenInvalidTenantsConfiguration_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Invalid tenants configuration: mandatory configuration - missing OPA Url", + Reason: lokiv1.ReasonInvalidTenantsConfiguration, + Requeue: false, + } + + fg := configv1.FeatureGates{ + LokiStackGateway: true, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + Tenants: &lokiv1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: nil, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + _, isLokiStack := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(object, stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, _, err := BuildOptions(context.TODO(), logger, k, stack, fg) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WhenMissingGatewaySecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Missing secrets for tenant test", + Reason: lokiv1.ReasonMissingGatewayTenantSecret, + Requeue: true, + } + + fg := configv1.FeatureGates{ + LokiStackGateway: true, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + Tenants: &lokiv1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1.AuthorizationSpec{ + OPA: &lokiv1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + o, ok := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && ok { + k.SetClientObject(o, stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, _, err := BuildOptions(context.TODO(), logger, k, stack, fg) + + // make sure error is returned to re-trigger reconciliation + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WhenInvalidGatewaySecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Invalid gateway tenant secret contents", + Reason: lokiv1.ReasonInvalidGatewayTenantSecret, + Requeue: true, + } + + fg := configv1.FeatureGates{ + LokiStackGateway: true, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + Tenants: &lokiv1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: invalidSecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1.AuthorizationSpec{ + OPA: &lokiv1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + o, ok := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && ok { + k.SetClientObject(o, stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + if name.Name == invalidSecret.Name { + k.SetClientObject(object, &invalidSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, _, err := BuildOptions(context.TODO(), logger, k, stack, fg) + + // make sure error is returned to re-trigger reconciliation + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_MissingTenantsSpec_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Invalid tenants configuration: TenantsSpec cannot be nil when gateway flag is enabled", + Reason: lokiv1.ReasonInvalidTenantsConfiguration, + Requeue: false, + } + + fg := configv1.FeatureGates{ + LokiStackGateway: true, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + Tenants: nil, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + o, ok := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && ok { + k.SetClientObject(o, stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, _, err := BuildOptions(context.TODO(), logger, k, stack, fg) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} diff --git a/operator/internal/handlers/internal/gateway/modes.go b/operator/internal/handlers/internal/gateway/modes.go index fd6bf5fae351..8fd9855b352d 100644 --- a/operator/internal/handlers/internal/gateway/modes.go +++ b/operator/internal/handlers/internal/gateway/modes.go @@ -6,8 +6,7 @@ import ( lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" ) -// ValidateModes validates the tenants mode specification. -func ValidateModes(stack lokiv1.LokiStack) error { +func validateModes(stack *lokiv1.LokiStack) error { if stack.Spec.Tenants.Mode == lokiv1.Static { if stack.Spec.Tenants.Authentication == nil { return kverrors.New("mandatory configuration - missing tenants' authentication configuration") diff --git a/operator/internal/handlers/internal/gateway/modes_test.go b/operator/internal/handlers/internal/gateway/modes_test.go index f54d348f6b25..f7899c1eae85 100644 --- a/operator/internal/handlers/internal/gateway/modes_test.go +++ b/operator/internal/handlers/internal/gateway/modes_test.go @@ -13,13 +13,13 @@ func TestValidateModes_StaticMode(t *testing.T) { type test struct { name string wantErr string - stack lokiv1.LokiStack + stack *lokiv1.LokiStack } table := []test{ { name: "missing authentication spec", wantErr: "mandatory configuration - missing tenants' authentication configuration", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -39,7 +39,7 @@ func TestValidateModes_StaticMode(t *testing.T) { { name: "missing roles spec", wantErr: "mandatory configuration - missing roles configuration", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -74,7 +74,7 @@ func TestValidateModes_StaticMode(t *testing.T) { { name: "missing role bindings spec", wantErr: "mandatory configuration - missing role bindings configuration", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -117,7 +117,7 @@ func TestValidateModes_StaticMode(t *testing.T) { { name: "incompatible OPA URL provided", wantErr: "incompatible configuration - OPA URL not required for mode static", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -174,7 +174,7 @@ func TestValidateModes_StaticMode(t *testing.T) { { name: "all set", wantErr: "", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -231,7 +231,7 @@ func TestValidateModes_StaticMode(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - err := ValidateModes(tst.stack) + err := validateModes(tst.stack) if tst.wantErr != "" { require.EqualError(t, err, tst.wantErr) } @@ -243,13 +243,13 @@ func TestValidateModes_DynamicMode(t *testing.T) { type test struct { name string wantErr string - stack lokiv1.LokiStack + stack *lokiv1.LokiStack } table := []test{ { name: "missing authentication spec", wantErr: "mandatory configuration - missing tenants configuration", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -269,7 +269,7 @@ func TestValidateModes_DynamicMode(t *testing.T) { { name: "missing OPA URL spec", wantErr: "mandatory configuration - missing OPA Url", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -304,7 +304,7 @@ func TestValidateModes_DynamicMode(t *testing.T) { { name: "incompatible roles configuration provided", wantErr: "incompatible configuration - static roles not required for mode dynamic", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -349,7 +349,7 @@ func TestValidateModes_DynamicMode(t *testing.T) { { name: "incompatible roleBindings configuration provided", wantErr: "incompatible configuration - static roleBindings not required for mode dynamic", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -398,7 +398,7 @@ func TestValidateModes_DynamicMode(t *testing.T) { { name: "all set", wantErr: "", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -438,7 +438,7 @@ func TestValidateModes_DynamicMode(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - err := ValidateModes(tst.stack) + err := validateModes(tst.stack) if tst.wantErr != "" { require.EqualError(t, err, tst.wantErr) } @@ -450,13 +450,13 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) { type test struct { name string wantErr string - stack lokiv1.LokiStack + stack *lokiv1.LokiStack } table := []test{ { name: "incompatible authentication spec provided", wantErr: "incompatible configuration - custom tenants configuration not required", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -488,7 +488,7 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) { { name: "incompatible authorization spec provided", wantErr: "incompatible configuration - custom tenants configuration not required", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -514,7 +514,7 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) { { name: "all set", wantErr: "", - stack: lokiv1.LokiStack{ + stack: &lokiv1.LokiStack{ TypeMeta: metav1.TypeMeta{ Kind: "LokiStack", }, @@ -537,7 +537,7 @@ func TestValidateModes_OpenshiftLoggingMode(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - err := ValidateModes(tst.stack) + err := validateModes(tst.stack) if tst.wantErr != "" { require.EqualError(t, err, tst.wantErr) } diff --git a/operator/internal/handlers/internal/gateway/tenant_configsecret.go b/operator/internal/handlers/internal/gateway/tenant_configsecret.go index c5b06c9c5c87..f4e6c493bc06 100644 --- a/operator/internal/handlers/internal/gateway/tenant_configsecret.go +++ b/operator/internal/handlers/internal/gateway/tenant_configsecret.go @@ -6,10 +6,10 @@ import ( "github.com/ViaQ/logerr/v2/kverrors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/json" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s" "github.com/grafana/loki/operator/internal/manifests" ) @@ -35,11 +35,11 @@ type openShiftSpec struct { CookieSecret string `json:"cookieSecret"` } -// GetTenantConfigSecretData returns the tenantName, tenantId, cookieSecret +// getTenantConfigFromSecret returns the tenantName, tenantId, cookieSecret // clusters to auto-create redirect URLs for OpenShift Auth or an error. -func GetTenantConfigSecretData(ctx context.Context, k k8s.Client, req ctrl.Request) (map[string]manifests.TenantConfig, error) { +func getTenantConfigFromSecret(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack) (map[string]manifests.TenantConfig, error) { var tenantSecret corev1.Secret - key := client.ObjectKey{Name: manifests.GatewayName(req.Name), Namespace: req.Namespace} + key := client.ObjectKey{Name: manifests.GatewayName(stack.Name), Namespace: stack.Namespace} if err := k.Get(ctx, key, &tenantSecret); err != nil { return nil, kverrors.Wrap(err, "couldn't find tenant secret.") } diff --git a/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go index f0035a89a16f..15e85a229546 100644 --- a/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go +++ b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go @@ -10,9 +10,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" "github.com/grafana/loki/operator/internal/manifests" ) @@ -38,8 +38,8 @@ tenants: func TestGetTenantConfigSecretData_SecretExist(t *testing.T) { k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ + s := &lokiv1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ Name: "lokistack-dev", Namespace: "some-ns", }, @@ -60,7 +60,7 @@ func TestGetTenantConfigSecretData_SecretExist(t *testing.T) { return nil } - ts, err := GetTenantConfigSecretData(context.TODO(), k, r) + ts, err := getTenantConfigFromSecret(context.TODO(), k, s) require.NotNil(t, ts) require.NoError(t, err) @@ -86,8 +86,8 @@ func TestGetTenantConfigSecretData_SecretExist(t *testing.T) { func TestGetTenantConfigSecretData_SecretNotExist(t *testing.T) { k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ + s := &lokiv1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ Name: "lokistack-dev", Namespace: "some-ns", }, @@ -97,7 +97,7 @@ func TestGetTenantConfigSecretData_SecretNotExist(t *testing.T) { return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") } - ts, err := GetTenantConfigSecretData(context.TODO(), k, r) + ts, err := getTenantConfigFromSecret(context.TODO(), k, s) require.Nil(t, ts) require.Error(t, err) } diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go index fd2775dfa06a..6cc39ae05e25 100644 --- a/operator/internal/handlers/internal/gateway/tenant_secrets.go +++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go @@ -7,7 +7,6 @@ import ( "github.com/ViaQ/logerr/v2/kverrors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" @@ -16,14 +15,13 @@ import ( "github.com/grafana/loki/operator/internal/status" ) -// GetTenantSecrets returns the list to gateway tenant secrets for a tenant mode. +// getTenantSecrets returns the list to gateway tenant secrets for a tenant mode. // For modes static and dynamic the secrets are fetched from external provided // secrets. For modes openshift-logging and openshift-network a secret per default tenants are created. // All secrets live in the same namespace as the lokistack request. -func GetTenantSecrets( +func getTenantSecrets( ctx context.Context, k k8s.Client, - req ctrl.Request, stack *lokiv1.LokiStack, ) ([]*manifests.TenantSecrets, error) { var ( @@ -34,7 +32,7 @@ func GetTenantSecrets( for _, tenant := range stack.Spec.Tenants.Authentication { switch { case tenant.OIDC != nil: - key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: req.Namespace} + key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: stack.Namespace} if err := k.Get(ctx, key, &gatewaySecret); err != nil { if apierrors.IsNotFound(err) { return nil, &status.DegradedError{ @@ -60,7 +58,7 @@ func GetTenantSecrets( OIDCSecret: oidcSecret, } if tenant.OIDC.IssuerCA != nil { - caPath, err := extractCAPath(ctx, k, req.Namespace, tenant.TenantName, tenant.OIDC.IssuerCA) + caPath, err := extractCAPath(ctx, k, stack.Namespace, tenant.TenantName, tenant.OIDC.IssuerCA) if err != nil { return nil, err } @@ -68,7 +66,7 @@ func GetTenantSecrets( } tenantSecrets = append(tenantSecrets, tennantSecret) case tenant.MTLS != nil: - caPath, err := extractCAPath(ctx, k, req.Namespace, tenant.TenantName, tenant.MTLS.CA) + caPath, err := extractCAPath(ctx, k, stack.Namespace, tenant.TenantName, tenant.MTLS.CA) if err != nil { return nil, err } diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go index d0292108d829..d0ccc0962e0b 100644 --- a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go +++ b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go @@ -9,7 +9,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" @@ -93,13 +92,6 @@ func TestGetTenantSecrets(t *testing.T) { } { t.Run(strings.Join([]string{string(mode), tc.name}, "_"), func(t *testing.T) { k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - s := &lokiv1.LokiStack{ ObjectMeta: metav1.ObjectMeta{ Name: "mystack", @@ -119,7 +111,7 @@ func TestGetTenantSecrets(t *testing.T) { } return nil } - ts, err := GetTenantSecrets(context.TODO(), k, r, s) + ts, err := getTenantSecrets(context.TODO(), k, s) require.NoError(t, err) require.ElementsMatch(t, ts, tc.expected) }) diff --git a/operator/internal/handlers/internal/rules/cleanup.go b/operator/internal/handlers/internal/rules/cleanup.go index 81805947efdd..abd5bacd5c03 100644 --- a/operator/internal/handlers/internal/rules/cleanup.go +++ b/operator/internal/handlers/internal/rules/cleanup.go @@ -4,25 +4,49 @@ import ( "context" "github.com/ViaQ/logerr/v2/kverrors" + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + v1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s" "github.com/grafana/loki/operator/internal/manifests" ) -// RemoveRulesConfigMap removes the rules configmaps if any exists. -func RemoveRulesConfigMap(ctx context.Context, req ctrl.Request, c client.Client) error { +// Cleanup removes the ruler component's statefulset and configmaps if available, or +// else it returns an error to retry the reconciliation loop. +func Cleanup(ctx context.Context, log logr.Logger, k k8s.Client, stack *v1.LokiStack) error { + if stack.Spec.Rules != nil && stack.Spec.Rules.Enabled { + return nil + } + + stackKey := client.ObjectKeyFromObject(stack) + + // Clean up ruler resources + if err := removeRulesConfigMap(ctx, k, stackKey); err != nil { + log.Error(err, "failed to remove rules ConfigMap") + return err + } + + if err := removeRuler(ctx, k, stackKey); err != nil { + log.Error(err, "failed to remove ruler StatefulSet") + return err + } + + return nil +} + +func removeRulesConfigMap(ctx context.Context, c client.Client, key client.ObjectKey) error { var rulesCmList corev1.ConfigMapList err := c.List(ctx, &rulesCmList, &client.ListOptions{ - Namespace: req.Namespace, + Namespace: key.Namespace, LabelSelector: labels.SelectorFromSet(labels.Set{ "app.kubernetes.io/component": manifests.LabelRulerComponent, - "app.kubernetes.io/instance": req.Name, + "app.kubernetes.io/instance": key.Name, }), }) if err != nil { @@ -41,10 +65,9 @@ func RemoveRulesConfigMap(ctx context.Context, req ctrl.Request, c client.Client return nil } -// RemoveRuler removes the ruler statefulset if it exists. -func RemoveRuler(ctx context.Context, req ctrl.Request, c client.Client) error { +func removeRuler(ctx context.Context, c client.Client, stack client.ObjectKey) error { // Check if the Statefulset exists before proceeding. - key := client.ObjectKey{Name: manifests.RulerName(req.Name), Namespace: req.Namespace} + key := client.ObjectKey{Name: manifests.RulerName(stack.Name), Namespace: stack.Namespace} var ruler appsv1.StatefulSet if err := c.Get(ctx, key, &ruler); err != nil { diff --git a/operator/internal/handlers/internal/rules/cleanup_test.go b/operator/internal/handlers/internal/rules/cleanup_test.go new file mode 100644 index 000000000000..5ecd9f69c345 --- /dev/null +++ b/operator/internal/handlers/internal/rules/cleanup_test.go @@ -0,0 +1,223 @@ +package rules + +import ( + "context" + "io" + "testing" + + "github.com/ViaQ/logerr/v2/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" +) + +var ( + logger = log.NewLogger("testing", log.WithOutput(io.Discard)) + + defaultSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "endpoint": []byte("s3://your-endpoint"), + "region": []byte("a-region"), + "bucketnames": []byte("bucket1,bucket2"), + "access_key_id": []byte("a-secret-id"), + "access_key_secret": []byte("a-secret-key"), + }, + } + + defaultGatewaySecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-gateway-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "clientID": []byte("client-123"), + "clientSecret": []byte("client-secret-xyz"), + "issuerCAPath": []byte("/tmp/test/ca.pem"), + }, + } + + rulesCM = corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack-rules-0", + Namespace: "some-ns", + }, + } + + rulerSS = appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack-ruler", + Namespace: "some-ns", + }, + } +) + +func TestCleanup_RemovesRulerResourcesWhenDisabled(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + Rules: &lokiv1.RulesSpec{ + Enabled: true, + }, + Tenants: &lokiv1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1.AuthorizationSpec{ + OPA: &lokiv1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object, _ ...client.GetOption) error { + _, ok := out.(*lokiv1.RulerConfig) + if ok { + return apierrors.NewNotFound(schema.GroupResource{}, "no ruler config") + } + + _, isLokiStack := out.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(out, &stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(out, &defaultSecret) + return nil + } + if defaultGatewaySecret.Name == name.Name { + k.SetClientObject(out, &defaultGatewaySecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error { + assert.Equal(t, r.Namespace, o.GetNamespace()) + return nil + } + + k.StatusStub = func() client.StatusWriter { return sw } + + k.DeleteStub = func(_ context.Context, o client.Object, _ ...client.DeleteOption) error { + assert.Equal(t, r.Namespace, o.GetNamespace()) + return nil + } + + k.ListStub = func(_ context.Context, list client.ObjectList, options ...client.ListOption) error { + switch list.(type) { + case *corev1.ConfigMapList: + k.SetClientObjectList(list, &corev1.ConfigMapList{ + Items: []corev1.ConfigMap{ + rulesCM, + }, + }) + } + return nil + } + + err := Cleanup(context.TODO(), logger, k, &stack) + require.NoError(t, err) + + // make sure delete not called + require.Zero(t, k.DeleteCallCount()) + + // Disable the ruler + stack.Spec.Rules.Enabled = false + + // Get should return ruler resources + k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object, _ ...client.GetOption) error { + _, ok := out.(*lokiv1.RulerConfig) + if ok { + return apierrors.NewNotFound(schema.GroupResource{}, "no ruler config") + } + if rulesCM.Name == name.Name { + k.SetClientObject(out, &rulesCM) + return nil + } + if rulerSS.Name == name.Name { + k.SetClientObject(out, &rulerSS) + return nil + } + + _, isLokiStack := out.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(out, &stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(out, &defaultSecret) + return nil + } + if defaultGatewaySecret.Name == name.Name { + k.SetClientObject(out, &defaultGatewaySecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + err = Cleanup(context.TODO(), logger, k, &stack) + require.NoError(t, err) + + // make sure delete was called twice (delete rules configmap and ruler statefulset) + require.Equal(t, 2, k.DeleteCallCount()) +} diff --git a/operator/internal/handlers/internal/rules/config.go b/operator/internal/handlers/internal/rules/config.go index f66b92ee06c1..ec4413fc49ec 100644 --- a/operator/internal/handlers/internal/rules/config.go +++ b/operator/internal/handlers/internal/rules/config.go @@ -5,19 +5,16 @@ import ( "github.com/ViaQ/logerr/v2/kverrors" apierrors "k8s.io/apimachinery/pkg/api/errors" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s" ) -// GetRulerConfig returns the ruler config spec for a lokistack resource or an error. +// getRulerConfig returns the ruler config spec for a lokistack resource or an error. // If the config is not found, we skip without an error. -func GetRulerConfig(ctx context.Context, k k8s.Client, req ctrl.Request) (*lokiv1.RulerConfigSpec, error) { +func getRulerConfig(ctx context.Context, k k8s.Client, key client.ObjectKey) (*lokiv1.RulerConfigSpec, error) { var rc lokiv1.RulerConfig - - key := client.ObjectKey{Name: req.Name, Namespace: req.Namespace} if err := k.Get(ctx, key, &rc); err != nil { if apierrors.IsNotFound(err) { return nil, nil diff --git a/operator/internal/handlers/internal/rules/rules.go b/operator/internal/handlers/internal/rules/rules.go index ac4a6d78f030..e21335e98c09 100644 --- a/operator/internal/handlers/internal/rules/rules.go +++ b/operator/internal/handlers/internal/rules/rules.go @@ -4,20 +4,114 @@ import ( "context" "github.com/ViaQ/logerr/v2/kverrors" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s" + "github.com/grafana/loki/operator/internal/handlers/internal/openshift" + "github.com/grafana/loki/operator/internal/manifests" + manifestsocp "github.com/grafana/loki/operator/internal/manifests/openshift" + "github.com/grafana/loki/operator/internal/status" ) -// List returns a slice of AlertingRules and a slice of RecordingRules for the given spec or an error. Three cases apply: -// - Return only matching rules in the stack namespace if no namespace selector given. -// - Return only matching rules in the stack namespace and in namespaces matching the namespace selector. -// - Return no rules if rules selector does not apply at all. -func List(ctx context.Context, k k8s.Client, stackNs string, rs *lokiv1.RulesSpec) ([]lokiv1.AlertingRule, []lokiv1.RecordingRule, error) { +// BuildOptions returns the ruler options needed to generate Kubernetes resource manifests. +// The returned error can be a status.DegradedError in the following cases: +// - When remote write is enabled and the authorization Secret is missing. +// - When remote write is enabled and the authorization Secret data is invalid. +func BuildOptions( + ctx context.Context, + log logr.Logger, + k k8s.Client, + stack *lokiv1.LokiStack, +) ([]lokiv1.AlertingRule, []lokiv1.RecordingRule, manifests.Ruler, manifestsocp.Options, error) { + if stack.Spec.Rules == nil || !stack.Spec.Rules.Enabled { + return nil, nil, manifests.Ruler{}, manifestsocp.Options{}, nil + } + + var ( + err error + alertingRules []lokiv1.AlertingRule + recordingRules []lokiv1.RecordingRule + rulerConfig *lokiv1.RulerConfigSpec + rulerSecret *manifests.RulerSecret + ruler manifests.Ruler + ocpOpts manifestsocp.Options + + stackKey = client.ObjectKeyFromObject(stack) + ) + + alertingRules, recordingRules, err = listRules(ctx, k, stack.Namespace, stack.Spec.Rules) + if err != nil { + log.Error(err, "failed to lookup rules", "spec", stack.Spec.Rules) + } + + rulerConfig, err = getRulerConfig(ctx, k, stackKey) + if err != nil { + log.Error(err, "failed to lookup ruler config", "key", stackKey) + } + + if rulerConfig != nil && rulerConfig.RemoteWriteSpec != nil && rulerConfig.RemoteWriteSpec.ClientSpec != nil { + var rs corev1.Secret + key := client.ObjectKey{Name: rulerConfig.RemoteWriteSpec.ClientSpec.AuthorizationSecretName, Namespace: stack.Namespace} + if err = k.Get(ctx, key, &rs); err != nil { + if apierrors.IsNotFound(err) { + return nil, nil, ruler, ocpOpts, &status.DegradedError{ + Message: "Missing ruler remote write authorization secret", + Reason: lokiv1.ReasonMissingRulerSecret, + Requeue: false, + } + } + return nil, nil, ruler, ocpOpts, kverrors.Wrap(err, "failed to lookup lokistack ruler secret", "name", key) + } + + rulerSecret, err = ExtractRulerSecret(&rs, rulerConfig.RemoteWriteSpec.ClientSpec.AuthorizationType) + if err != nil { + return nil, nil, ruler, ocpOpts, &status.DegradedError{ + Message: "Invalid ruler remote write authorization secret contents", + Reason: lokiv1.ReasonInvalidRulerSecret, + Requeue: false, + } + } + } + + ocpAmEnabled, err := openshift.AlertManagerSVCExists(ctx, stack.Spec, k) + if err != nil { + log.Error(err, "failed to check OCP AlertManager") + return nil, nil, ruler, ocpOpts, err + } + + ocpUWAmEnabled, err := openshift.UserWorkloadAlertManagerSVCExists(ctx, stack.Spec, k) + if err != nil { + log.Error(err, "failed to check OCP User Workload AlertManager") + return nil, nil, ruler, ocpOpts, err + } + + ruler = manifests.Ruler{ + Spec: rulerConfig, + Secret: rulerSecret, + } + + ocpOpts = manifestsocp.Options{ + BuildOpts: manifestsocp.BuildOptions{ + AlertManagerEnabled: ocpAmEnabled, + UserWorkloadAlertManagerEnabled: ocpUWAmEnabled, + }, + } + + return alertingRules, recordingRules, ruler, ocpOpts, nil +} + +// listRules returns a slice of AlertingRules and a slice of RecordingRules for the given spec or an error. +// Three cases apply: +// - Return only matching rules in the stack namespace if no namespace selector is given. +// - Return only matching rules in the stack namespace and in namespaces matching the namespace selector. +// - Return no rules if rules selector does not apply at all. +func listRules(ctx context.Context, k k8s.Client, stackNs string, rs *lokiv1.RulesSpec) ([]lokiv1.AlertingRule, []lokiv1.RecordingRule, error) { nsl, err := selectRulesNamespaces(ctx, k, stackNs, rs) if err != nil { return nil, nil, err diff --git a/operator/internal/handlers/internal/rules/rules_test.go b/operator/internal/handlers/internal/rules/rules_test.go index 8bc52afb6a9a..e33a2ac928a6 100644 --- a/operator/internal/handlers/internal/rules/rules_test.go +++ b/operator/internal/handlers/internal/rules/rules_test.go @@ -1,4 +1,4 @@ -package rules_test +package rules import ( "context" @@ -11,13 +11,252 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" - "github.com/grafana/loki/operator/internal/handlers/internal/rules" + "github.com/grafana/loki/operator/internal/status" ) +func TestBuildOptions_WhenMissingRemoteWriteSecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + Rules: &lokiv1.RulesSpec{ + Enabled: true, + }, + Tenants: &lokiv1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1.AuthorizationSpec{ + OPA: &lokiv1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + rulerCfg := &lokiv1.RulerConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.RulerConfigSpec{ + RemoteWriteSpec: &lokiv1.RemoteWriteSpec{ + Enabled: true, + ClientSpec: &lokiv1.RemoteWriteClientSpec{ + AuthorizationType: lokiv1.BasicAuthorization, + AuthorizationSecretName: "test", + }, + }, + }, + } + + degradedErr := &status.DegradedError{ + Message: "Missing ruler remote write authorization secret", + Reason: lokiv1.ReasonMissingRulerSecret, + Requeue: false, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object, _ ...client.GetOption) error { + _, isRulerConfig := out.(*lokiv1.RulerConfig) + if r.Name == name.Name && r.Namespace == name.Namespace && isRulerConfig { + k.SetClientObject(out, rulerCfg) + return nil + } + + _, isLokiStack := out.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(out, &stack) + return nil + } + if defaultSecret.Name == name.Name { + k.SetClientObject(out, &defaultSecret) + return nil + } + if defaultGatewaySecret.Name == name.Name { + k.SetClientObject(out, &defaultGatewaySecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, _, _, _, err := BuildOptions(context.TODO(), logger, k, &stack) + + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WhenInvalidRemoteWriteSecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + stack := lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + Rules: &lokiv1.RulesSpec{ + Enabled: true, + }, + Tenants: &lokiv1.TenantsSpec{ + Mode: "dynamic", + Authentication: []lokiv1.AuthenticationSpec{ + { + TenantName: "test", + TenantID: "1234", + OIDC: &lokiv1.OIDCSpec{ + Secret: &lokiv1.TenantSecretSpec{ + Name: defaultGatewaySecret.Name, + }, + }, + }, + }, + Authorization: &lokiv1.AuthorizationSpec{ + OPA: &lokiv1.OPASpec{ + URL: "some-url", + }, + }, + }, + }, + } + + rulerCfg := &lokiv1.RulerConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.RulerConfigSpec{ + RemoteWriteSpec: &lokiv1.RemoteWriteSpec{ + Enabled: true, + ClientSpec: &lokiv1.RemoteWriteClientSpec{ + AuthorizationType: lokiv1.BasicAuthorization, + AuthorizationSecretName: "some-client-secret", + }, + }, + }, + } + + invalidSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-client-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{}, + } + + degradedErr := &status.DegradedError{ + Message: "Invalid ruler remote write authorization secret contents", + Reason: lokiv1.ReasonInvalidRulerSecret, + Requeue: false, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object, _ ...client.GetOption) error { + _, isRulerConfig := out.(*lokiv1.RulerConfig) + if r.Name == name.Name && r.Namespace == name.Namespace && isRulerConfig { + k.SetClientObject(out, rulerCfg) + return nil + } + + _, isLokiStack := out.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(out, &stack) + return nil + } + if invalidSecret.Name == name.Name { + k.SetClientObject(out, &invalidSecret) + return nil + } + if defaultGatewaySecret.Name == name.Name { + k.SetClientObject(out, &defaultGatewaySecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, _, _, _, err := BuildOptions(context.TODO(), logger, k, &stack) + + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + func TestList_AlertingRulesMatchSelector_WithDefaultStackNamespaceRules(t *testing.T) { const stackNs = "some-ns" @@ -83,7 +322,7 @@ func TestList_AlertingRulesMatchSelector_WithDefaultStackNamespaceRules(t *testi return nil } - rules, _, err := rules.List(context.TODO(), k, stackNs, rs) + rules, _, err := listRules(context.TODO(), k, stackNs, rs) require.NoError(t, err) require.NotEmpty(t, rules) @@ -185,7 +424,7 @@ func TestList_AlertingRulesMatchSelector_FilteredByNamespaceSelector(t *testing. return nil } - rules, _, err := rules.List(context.TODO(), k, stackNs, rs) + rules, _, err := listRules(context.TODO(), k, stackNs, rs) require.NoError(t, err) require.NotEmpty(t, rules) @@ -257,7 +496,7 @@ func TestList_RecordingRulesMatchSelector_WithDefaultStackNamespaceRules(t *test return nil } - _, rules, err := rules.List(context.TODO(), k, stackNs, rs) + _, rules, err := listRules(context.TODO(), k, stackNs, rs) require.NoError(t, err) require.NotEmpty(t, rules) @@ -358,7 +597,7 @@ func TestList_RecordingRulesMatchSelector_FilteredByNamespaceSelector(t *testing return nil } - _, rules, err := rules.List(context.TODO(), k, stackNs, rs) + _, rules, err := listRules(context.TODO(), k, stackNs, rs) require.NoError(t, err) require.NotEmpty(t, rules) diff --git a/operator/internal/handlers/internal/storage/ca_configmap.go b/operator/internal/handlers/internal/storage/ca_configmap.go index ccb4f93d06a3..904e63373a20 100644 --- a/operator/internal/handlers/internal/storage/ca_configmap.go +++ b/operator/internal/handlers/internal/storage/ca_configmap.go @@ -1,9 +1,67 @@ package storage -import corev1 "k8s.io/api/core/v1" +import ( + "context" + "crypto/sha1" + "fmt" -// IsValidCAConfigMap checks if the given CA configMap has an -// non-empty entry for the key -func IsValidCAConfigMap(cm *corev1.ConfigMap, key string) bool { - return cm.Data[key] != "" + "github.com/ViaQ/logerr/v2/kverrors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s" + "github.com/grafana/loki/operator/internal/status" +) + +const ( + defaultCAKey = "service-ca.crt" +) + +type caKeyError string + +func (e caKeyError) Error() string { + return fmt.Sprintf("key not present or data empty: %s", string(e)) +} + +func getCAConfigMap(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack, name string) (*corev1.ConfigMap, error) { + var cm corev1.ConfigMap + key := client.ObjectKey{Name: name, Namespace: stack.Namespace} + if err := k.Get(ctx, key, &cm); err != nil { + if apierrors.IsNotFound(err) { + return nil, &status.DegradedError{ + Message: "Missing object storage CA config map", + Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap, + Requeue: false, + } + } + return nil, kverrors.Wrap(err, "failed to lookup lokistack object storage CA config map", "name", key) + } + + return &cm, nil +} + +// checkCAConfigMap checks if the given CA configMap has an non-empty entry for the key used as CA certificate. +// If the key is present it will return a hash of the current key name and contents. +func checkCAConfigMap(cm *corev1.ConfigMap, key string) (string, error) { + data := cm.Data[key] + if data == "" { + return "", caKeyError(key) + } + + h := sha1.New() + if _, err := h.Write([]byte(key)); err != nil { + return "", err + } + + if _, err := h.Write(hashSeparator); err != nil { + return "", err + } + + if _, err := h.Write([]byte(data)); err != nil { + return "", err + } + + return fmt.Sprintf("%x", h.Sum(nil)), nil } diff --git a/operator/internal/handlers/internal/storage/ca_configmap_test.go b/operator/internal/handlers/internal/storage/ca_configmap_test.go index 1e164f5a2541..33d5156defe9 100644 --- a/operator/internal/handlers/internal/storage/ca_configmap_test.go +++ b/operator/internal/handlers/internal/storage/ca_configmap_test.go @@ -1,19 +1,18 @@ -package storage_test +package storage import ( "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - - "github.com/grafana/loki/operator/internal/handlers/internal/storage" ) -func TestIsValidConfigMap(t *testing.T) { +func TestCheckValidConfigMap(t *testing.T) { type test struct { - name string - cm *corev1.ConfigMap - valid bool + name string + cm *corev1.ConfigMap + wantHash string + wantErrorMsg string } table := []test{ { @@ -23,11 +22,13 @@ func TestIsValidConfigMap(t *testing.T) { "service-ca.crt": "has-some-data", }, }, - valid: true, + wantHash: "de6ae206d4920549d21c24ad9721e87a9b1ec7dc", + wantErrorMsg: "", }, { - name: "missing `service-ca.crt` key", - cm: &corev1.ConfigMap{}, + name: "missing `service-ca.crt` key", + cm: &corev1.ConfigMap{}, + wantErrorMsg: "key not present or data empty: service-ca.crt", }, { name: "missing CA content", @@ -36,6 +37,7 @@ func TestIsValidConfigMap(t *testing.T) { "service-ca.crt": "", }, }, + wantErrorMsg: "key not present or data empty: service-ca.crt", }, } for _, tst := range table { @@ -43,8 +45,14 @@ func TestIsValidConfigMap(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - ok := storage.IsValidCAConfigMap(tst.cm, "service-ca.crt") - require.Equal(t, tst.valid, ok) + hash, err := checkCAConfigMap(tst.cm, "service-ca.crt") + + require.Equal(t, tst.wantHash, hash) + if tst.wantErrorMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, tst.wantErrorMsg) + } }) } } diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go index 1341728e7cec..0ef5f197a625 100644 --- a/operator/internal/handlers/internal/storage/secrets.go +++ b/operator/internal/handlers/internal/storage/secrets.go @@ -1,24 +1,46 @@ package storage import ( + "context" "crypto/sha1" "fmt" "sort" "github.com/ViaQ/logerr/v2/kverrors" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s" "github.com/grafana/loki/operator/internal/manifests/storage" + "github.com/grafana/loki/operator/internal/status" ) var hashSeparator = []byte(",") -// ExtractSecret reads a k8s secret into a manifest object storage struct if valid. -func ExtractSecret(s *corev1.Secret, secretType lokiv1.ObjectStorageSecretType) (*storage.Options, error) { +func getSecret(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack) (*corev1.Secret, error) { + var storageSecret corev1.Secret + key := client.ObjectKey{Name: stack.Spec.Storage.Secret.Name, Namespace: stack.Namespace} + if err := k.Get(ctx, key, &storageSecret); err != nil { + if apierrors.IsNotFound(err) { + return nil, &status.DegradedError{ + Message: "Missing object storage secret", + Reason: lokiv1.ReasonMissingObjectStorageSecret, + Requeue: false, + } + } + return nil, kverrors.Wrap(err, "failed to lookup lokistack storage secret", "name", key) + } + + return &storageSecret, nil +} + +// extractSecret reads a k8s secret into a manifest object storage struct if valid. +func extractSecret(s *corev1.Secret, secretType lokiv1.ObjectStorageSecretType) (storage.Options, error) { hash, err := hashSecretData(s) if err != nil { - return nil, kverrors.Wrap(err, "error calculating hash for secret", "type", secretType) + return storage.Options{}, kverrors.Wrap(err, "error calculating hash for secret", "type", secretType) } storageOpts := storage.Options{ @@ -39,13 +61,13 @@ func ExtractSecret(s *corev1.Secret, secretType lokiv1.ObjectStorageSecretType) case lokiv1.ObjectStorageSecretAlibabaCloud: storageOpts.AlibabaCloud, err = extractAlibabaCloudConfigSecret(s) default: - return nil, kverrors.New("unknown secret type", "type", secretType) + return storage.Options{}, kverrors.New("unknown secret type", "type", secretType) } if err != nil { - return nil, err + return storage.Options{}, err } - return &storageOpts, nil + return storageOpts, nil } func hashSecretData(s *corev1.Secret) (string, error) { diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go index 46ddc133f9f4..c72c63ea1ee1 100644 --- a/operator/internal/handlers/internal/storage/secrets_test.go +++ b/operator/internal/handlers/internal/storage/secrets_test.go @@ -135,7 +135,7 @@ func TestAzureExtract(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretAzure) + opts, err := extractSecret(tst.secret, lokiv1.ObjectStorageSecretAzure) if !tst.wantErr { require.NoError(t, err) require.NotEmpty(t, opts.SecretName) @@ -186,7 +186,7 @@ func TestGCSExtract(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - _, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretGCS) + _, err := extractSecret(tst.secret, lokiv1.ObjectStorageSecretGCS) if !tst.wantErr { require.NoError(t, err) } @@ -360,7 +360,7 @@ func TestS3Extract(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretS3) + opts, err := extractSecret(tst.secret, lokiv1.ObjectStorageSecretS3) if !tst.wantErr { require.NoError(t, err) require.NotEmpty(t, opts.SecretName) @@ -509,7 +509,7 @@ func TestSwiftExtract(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretSwift) + opts, err := extractSecret(tst.secret, lokiv1.ObjectStorageSecretSwift) if !tst.wantErr { require.NoError(t, err) require.NotEmpty(t, opts.SecretName) @@ -583,7 +583,7 @@ func TestAlibabaCloudExtract(t *testing.T) { t.Run(tst.name, func(t *testing.T) { t.Parallel() - opts, err := ExtractSecret(tst.secret, lokiv1.ObjectStorageSecretAlibabaCloud) + opts, err := extractSecret(tst.secret, lokiv1.ObjectStorageSecretAlibabaCloud) if !tst.wantErr { require.NoError(t, err) require.NotEmpty(t, opts.SecretName) diff --git a/operator/internal/handlers/internal/storage/storage.go b/operator/internal/handlers/internal/storage/storage.go new file mode 100644 index 000000000000..e1657121ccd6 --- /dev/null +++ b/operator/internal/handlers/internal/storage/storage.go @@ -0,0 +1,91 @@ +package storage + +import ( + "context" + "fmt" + "time" + + configv1 "github.com/grafana/loki/operator/apis/config/v1" + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s" + "github.com/grafana/loki/operator/internal/manifests/storage" + "github.com/grafana/loki/operator/internal/status" +) + +// BuildOptions returns the object storage options to generate Kubernetes resource manifests +// which require access to object storage buckets. +// The returned error can be a status.DegradedError in the following cases: +// - The user-provided object storage secret is missing. +// - The object storage Secret data is invalid. +// - The object storage schema config is invalid. +// - The object storage CA ConfigMap is missing if one referenced. +// - The object storage CA ConfigMap data is invalid. +func BuildOptions(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack, fg configv1.FeatureGates) (storage.Options, error) { + storageSecret, err := getSecret(ctx, k, stack) + if err != nil { + return storage.Options{}, err + } + + objStore, err := extractSecret(storageSecret, stack.Spec.Storage.Secret.Type) + if err != nil { + return storage.Options{}, &status.DegradedError{ + Message: fmt.Sprintf("Invalid object storage secret contents: %s", err), + Reason: lokiv1.ReasonInvalidObjectStorageSecret, + Requeue: false, + } + } + objStore.OpenShiftEnabled = fg.OpenShift.Enabled + + storageSchemas, err := storage.BuildSchemaConfig( + time.Now().UTC(), + stack.Spec.Storage, + stack.Status.Storage, + ) + if err != nil { + return storage.Options{}, &status.DegradedError{ + Message: fmt.Sprintf("Invalid object storage schema contents: %s", err), + Reason: lokiv1.ReasonInvalidObjectStorageSchema, + Requeue: false, + } + } + + objStore.Schemas = storageSchemas + + if stack.Spec.Storage.TLS == nil { + return objStore, nil + } + + tlsConfig := stack.Spec.Storage.TLS + if tlsConfig.CA == "" { + return storage.Options{}, &status.DegradedError{ + Message: "Missing object storage CA config map", + Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap, + Requeue: false, + } + } + + cm, err := getCAConfigMap(ctx, k, stack, tlsConfig.CA) + if err != nil { + return storage.Options{}, err + } + + caKey := defaultCAKey + if tlsConfig.CAKey != "" { + caKey = tlsConfig.CAKey + } + + var caHash string + caHash, err = checkCAConfigMap(cm, caKey) + if err != nil { + return storage.Options{}, &status.DegradedError{ + Message: fmt.Sprintf("Invalid object storage CA configmap contents: %s", err), + Reason: lokiv1.ReasonInvalidObjectStorageCAConfigMap, + Requeue: false, + } + } + + objStore.SecretSHA1 = fmt.Sprintf("%s;%s", objStore.SecretSHA1, caHash) + objStore.TLS = &storage.TLSConfig{CA: cm.Name, Key: caKey} + + return objStore, nil +} diff --git a/operator/internal/handlers/internal/storage/storage_test.go b/operator/internal/handlers/internal/storage/storage_test.go new file mode 100644 index 000000000000..f56e446d6da8 --- /dev/null +++ b/operator/internal/handlers/internal/storage/storage_test.go @@ -0,0 +1,477 @@ +package storage + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + configv1 "github.com/grafana/loki/operator/apis/config/v1" + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" + "github.com/grafana/loki/operator/internal/status" +) + +var ( + featureGates = configv1.FeatureGates{ + ServiceMonitors: false, + ServiceMonitorTLSEndpoints: false, + BuiltInCertManagement: configv1.BuiltInCertManagement{ + Enabled: true, + CACertValidity: "10m", + CACertRefresh: "5m", + CertValidity: "2m", + CertRefresh: "1m", + }, + } + + defaultSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{ + "endpoint": []byte("s3://your-endpoint"), + "region": []byte("a-region"), + "bucketnames": []byte("bucket1,bucket2"), + "access_key_id": []byte("a-secret-id"), + "access_key_secret": []byte("a-secret-key"), + }, + } + + invalidSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-secret", + Namespace: "some-ns", + }, + Data: map[string][]byte{}, + } + + invalidCAConfigMap = corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-stack-ca-configmap", + Namespace: "some-ns", + }, + Data: map[string]string{}, + } +) + +func TestBuildOptions_WhenMissingSecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Missing object storage secret", + Reason: lokiv1.ReasonMissingObjectStorageSecret, + Requeue: false, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + _, isLokiStack := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(object, stack) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, err := BuildOptions(context.TODO(), k, stack, featureGates) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WhenInvalidSecret_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Invalid object storage secret contents: missing secret field", + Reason: lokiv1.ReasonInvalidObjectStorageSecret, + Requeue: false, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: invalidSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + _, isLokiStack := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(object, stack) + return nil + } + if name.Name == invalidSecret.Name { + k.SetClientObject(object, &invalidSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, err := BuildOptions(context.TODO(), k, stack, featureGates) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WithInvalidStorageSchema_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Invalid object storage schema contents: spec does not contain any schemas", + Reason: lokiv1.ReasonInvalidObjectStorageSchema, + Requeue: false, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{}, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + }, + }, + Status: lokiv1.LokiStackStatus{ + Storage: lokiv1.LokiStackStorageStatus{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + { + Version: lokiv1.ObjectStorageSchemaV12, + EffectiveDate: "2021-10-11", + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + _, isLokiStack := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(object, stack) + return nil + } + if name.Name == defaultSecret.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, err := BuildOptions(context.TODO(), k, stack, featureGates) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WhenMissingCAConfigMap_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Missing object storage CA config map", + Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap, + Requeue: false, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + TLS: &lokiv1.ObjectStorageTLSSpec{ + CASpec: lokiv1.CASpec{ + CA: "not-existing", + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + _, isLokiStack := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(object, stack) + return nil + } + + if name.Name == defaultSecret.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, err := BuildOptions(context.TODO(), k, stack, featureGates) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WhenEmptyCAConfigMapName_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Missing object storage CA config map", + Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap, + Requeue: false, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + TLS: &lokiv1.ObjectStorageTLSSpec{ + CASpec: lokiv1.CASpec{ + CA: "", + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + _, isLokiStack := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(object, stack) + return nil + } + + if name.Name == defaultSecret.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, err := BuildOptions(context.TODO(), k, stack, featureGates) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} + +func TestBuildOptions_WhenInvalidCAConfigMap_SetDegraded(t *testing.T) { + sw := &k8sfakes.FakeStatusWriter{} + k := &k8sfakes.FakeClient{} + r := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + degradedErr := &status.DegradedError{ + Message: "Invalid object storage CA configmap contents: key not present or data empty: service-ca.crt", + Reason: lokiv1.ReasonInvalidObjectStorageCAConfigMap, + Requeue: false, + } + + stack := &lokiv1.LokiStack{ + TypeMeta: metav1.TypeMeta{ + Kind: "LokiStack", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", + }, + Spec: lokiv1.LokiStackSpec{ + Size: lokiv1.SizeOneXExtraSmall, + Storage: lokiv1.ObjectStorageSpec{ + Schemas: []lokiv1.ObjectStorageSchema{ + { + Version: lokiv1.ObjectStorageSchemaV11, + EffectiveDate: "2020-10-11", + }, + }, + Secret: lokiv1.ObjectStorageSecretSpec{ + Name: defaultSecret.Name, + Type: lokiv1.ObjectStorageSecretS3, + }, + TLS: &lokiv1.ObjectStorageTLSSpec{ + CASpec: lokiv1.CASpec{ + CA: invalidCAConfigMap.Name, + }, + }, + }, + }, + } + + k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { + _, isLokiStack := object.(*lokiv1.LokiStack) + if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + k.SetClientObject(object, stack) + return nil + } + if name.Name == defaultSecret.Name { + k.SetClientObject(object, &defaultSecret) + return nil + } + + if name.Name == invalidCAConfigMap.Name { + k.SetClientObject(object, &invalidCAConfigMap) + return nil + } + return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + } + + k.StatusStub = func() client.StatusWriter { return sw } + + _, err := BuildOptions(context.TODO(), k, stack, featureGates) + + // make sure error is returned + require.Error(t, err) + require.Equal(t, degradedErr, err) +} diff --git a/operator/internal/handlers/lokistack_check_cert_expiry_test.go b/operator/internal/handlers/lokistack_check_cert_expiry_test.go index 400b6244d3af..6b6d35c8b6bc 100644 --- a/operator/internal/handlers/lokistack_check_cert_expiry_test.go +++ b/operator/internal/handlers/lokistack_check_cert_expiry_test.go @@ -12,7 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -158,8 +158,8 @@ func TestCheckCertExpiry_WhenGetOptionsReturnsCABUndleNotFound_DoesNotError(t *t Kind: "LokiStack", Name: "my-stack", UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go index 49c84af4dcf4..2f78f75d02c5 100644 --- a/operator/internal/handlers/lokistack_create_or_update.go +++ b/operator/internal/handlers/lokistack_create_or_update.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "time" "github.com/ViaQ/logerr/v2/kverrors" "github.com/go-logr/logr" @@ -20,22 +19,15 @@ import ( lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s" "github.com/grafana/loki/operator/internal/handlers/internal/gateway" - "github.com/grafana/loki/operator/internal/handlers/internal/openshift" "github.com/grafana/loki/operator/internal/handlers/internal/rules" "github.com/grafana/loki/operator/internal/handlers/internal/serviceaccounts" "github.com/grafana/loki/operator/internal/handlers/internal/storage" "github.com/grafana/loki/operator/internal/handlers/internal/tlsprofile" "github.com/grafana/loki/operator/internal/manifests" - manifests_openshift "github.com/grafana/loki/operator/internal/manifests/openshift" - storageoptions "github.com/grafana/loki/operator/internal/manifests/storage" "github.com/grafana/loki/operator/internal/metrics" "github.com/grafana/loki/operator/internal/status" ) -const ( - defaultCAKey = "service-ca.crt" -) - // CreateOrUpdateLokiStack handles LokiStack create and update events. func CreateOrUpdateLokiStack( ctx context.Context, @@ -67,202 +59,23 @@ func CreateOrUpdateLokiStack( gwImg = manifests.DefaultLokiStackGatewayImage } - var storageSecret corev1.Secret - key := client.ObjectKey{Name: stack.Spec.Storage.Secret.Name, Namespace: stack.Namespace} - if err := k.Get(ctx, key, &storageSecret); err != nil { - if apierrors.IsNotFound(err) { - return &status.DegradedError{ - Message: "Missing object storage secret", - Reason: lokiv1.ReasonMissingObjectStorageSecret, - Requeue: false, - } - } - return kverrors.Wrap(err, "failed to lookup lokistack storage secret", "name", key) - } - - objStore, err := storage.ExtractSecret(&storageSecret, stack.Spec.Storage.Secret.Type) + objStore, err := storage.BuildOptions(ctx, k, &stack, fg) if err != nil { - return &status.DegradedError{ - Message: fmt.Sprintf("Invalid object storage secret contents: %s", err), - Reason: lokiv1.ReasonInvalidObjectStorageSecret, - Requeue: false, - } + return err } - objStore.OpenShiftEnabled = fg.OpenShift.Enabled - storageSchemas, err := storageoptions.BuildSchemaConfig( - time.Now().UTC(), - stack.Spec.Storage, - stack.Status.Storage, - ) + baseDomain, tenants, err := gateway.BuildOptions(ctx, ll, k, &stack, fg) if err != nil { - return &status.DegradedError{ - Message: fmt.Sprintf("Invalid object storage schema contents: %s", err), - Reason: lokiv1.ReasonInvalidObjectStorageSchema, - Requeue: false, - } - } - - objStore.Schemas = storageSchemas - - if stack.Spec.Storage.TLS != nil { - tlsConfig := stack.Spec.Storage.TLS - - if tlsConfig.CA == "" { - return &status.DegradedError{ - Message: "Missing object storage CA config map", - Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap, - Requeue: false, - } - } - - var cm corev1.ConfigMap - key := client.ObjectKey{Name: tlsConfig.CA, Namespace: stack.Namespace} - if err = k.Get(ctx, key, &cm); err != nil { - if apierrors.IsNotFound(err) { - return &status.DegradedError{ - Message: "Missing object storage CA config map", - Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap, - Requeue: false, - } - } - return kverrors.Wrap(err, "failed to lookup lokistack object storage CA config map", "name", key) - } - - caKey := defaultCAKey - if tlsConfig.CAKey != "" { - caKey = tlsConfig.CAKey - } - - if !storage.IsValidCAConfigMap(&cm, caKey) { - return &status.DegradedError{ - Message: "Invalid object storage CA configmap contents: missing key or no contents", - Reason: lokiv1.ReasonInvalidObjectStorageCAConfigMap, - Requeue: false, - } - } - - objStore.TLS = &storageoptions.TLSConfig{CA: cm.Name, Key: caKey} + return err } - var ( - baseDomain string - tenantSecrets []*manifests.TenantSecrets - tenantConfigs map[string]manifests.TenantConfig - ) - if fg.LokiStackGateway && stack.Spec.Tenants == nil { - return &status.DegradedError{ - Message: "Invalid tenants configuration - TenantsSpec cannot be nil when gateway flag is enabled", - Reason: lokiv1.ReasonInvalidTenantsConfiguration, - Requeue: false, - } - } else if fg.LokiStackGateway && stack.Spec.Tenants != nil { - if err = gateway.ValidateModes(stack); err != nil { - return &status.DegradedError{ - Message: fmt.Sprintf("Invalid tenants configuration: %s", err), - Reason: lokiv1.ReasonInvalidTenantsConfiguration, - Requeue: false, - } - } - - switch stack.Spec.Tenants.Mode { - case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork: - baseDomain, err = gateway.GetOpenShiftBaseDomain(ctx, k, req) - if err != nil { - return err - } - - if stack.Spec.Proxy == nil { - // If the LokiStack has no proxy set but there is a cluster-wide proxy setting, - // set the LokiStack proxy to that. - ocpProxy, proxyErr := openshift.GetProxy(ctx, k) - if proxyErr != nil { - return proxyErr - } - - stack.Spec.Proxy = ocpProxy - } - default: - tenantSecrets, err = gateway.GetTenantSecrets(ctx, k, req, &stack) - if err != nil { - return err - } - } - - // extract the existing tenant's id, cookieSecret if exists, otherwise create new. - tenantConfigs, err = gateway.GetTenantConfigSecretData(ctx, k, req) - if err != nil { - ll.Error(err, "error in getting tenant secret data") - } + if err = rules.Cleanup(ctx, ll, k, &stack); err != nil { + return err } - var ( - alertingRules []lokiv1.AlertingRule - recordingRules []lokiv1.RecordingRule - rulerConfig *lokiv1.RulerConfigSpec - rulerSecret *manifests.RulerSecret - ocpAmEnabled bool - ocpUWAmEnabled bool - ) - if stack.Spec.Rules != nil && stack.Spec.Rules.Enabled { - alertingRules, recordingRules, err = rules.List(ctx, k, req.Namespace, stack.Spec.Rules) - if err != nil { - ll.Error(err, "failed to lookup rules", "spec", stack.Spec.Rules) - } - - rulerConfig, err = rules.GetRulerConfig(ctx, k, req) - if err != nil { - ll.Error(err, "failed to lookup ruler config", "key", req.NamespacedName) - } - - if rulerConfig != nil && rulerConfig.RemoteWriteSpec != nil && rulerConfig.RemoteWriteSpec.ClientSpec != nil { - var rs corev1.Secret - key := client.ObjectKey{Name: rulerConfig.RemoteWriteSpec.ClientSpec.AuthorizationSecretName, Namespace: stack.Namespace} - if err = k.Get(ctx, key, &rs); err != nil { - if apierrors.IsNotFound(err) { - return &status.DegradedError{ - Message: "Missing ruler remote write authorization secret", - Reason: lokiv1.ReasonMissingRulerSecret, - Requeue: false, - } - } - return kverrors.Wrap(err, "failed to lookup lokistack ruler secret", "name", key) - } - - rulerSecret, err = rules.ExtractRulerSecret(&rs, rulerConfig.RemoteWriteSpec.ClientSpec.AuthorizationType) - if err != nil { - return &status.DegradedError{ - Message: "Invalid ruler remote write authorization secret contents", - Reason: lokiv1.ReasonInvalidRulerSecret, - Requeue: false, - } - } - } - - ocpAmEnabled, err = openshift.AlertManagerSVCExists(ctx, stack.Spec, k) - if err != nil { - ll.Error(err, "failed to check OCP AlertManager") - return err - } - - ocpUWAmEnabled, err = openshift.UserWorkloadAlertManagerSVCExists(ctx, stack.Spec, k) - if err != nil { - ll.Error(err, "failed to check OCP User Workload AlertManager") - return err - } - } else { - // Clean up ruler resources - err = rules.RemoveRulesConfigMap(ctx, req, k) - if err != nil { - ll.Error(err, "failed to remove rules ConfigMap") - return err - } - - err = rules.RemoveRuler(ctx, req, k) - if err != nil { - ll.Error(err, "failed to remove ruler StatefulSet") - return err - } + alertingRules, recordingRules, ruler, ocpOptions, err := rules.BuildOptions(ctx, ll, k, &stack) + if err != nil { + return err } certRotationRequiredAt := "" @@ -289,25 +102,14 @@ func CreateOrUpdateLokiStack( GatewayBaseDomain: baseDomain, Stack: stack.Spec, Gates: fg, - ObjectStorage: *objStore, + ObjectStorage: objStore, CertRotationRequiredAt: certRotationRequiredAt, AlertingRules: alertingRules, RecordingRules: recordingRules, - Ruler: manifests.Ruler{ - Spec: rulerConfig, - Secret: rulerSecret, - }, - Timeouts: timeoutConfig, - Tenants: manifests.Tenants{ - Secrets: tenantSecrets, - Configs: tenantConfigs, - }, - OpenShiftOptions: manifests_openshift.Options{ - BuildOpts: manifests_openshift.BuildOptions{ - AlertManagerEnabled: ocpAmEnabled, - UserWorkloadAlertManagerEnabled: ocpUWAmEnabled, - }, - }, + Ruler: ruler, + Timeouts: timeoutConfig, + Tenants: tenants, + OpenShiftOptions: ocpOptions, } ll.Info("begin building manifests") @@ -354,7 +156,7 @@ func CreateOrUpdateLokiStack( // updated and another resource is not. This would cause the status to // be possibly misaligned with the configmap, which could lead to // a user possibly being unable to read logs. - if err := status.SetStorageSchemaStatus(ctx, k, req, storageSchemas); err != nil { + if err := status.SetStorageSchemaStatus(ctx, k, req, objStore.Schemas); err != nil { ll.Error(err, "failed to set storage schema status") return err } @@ -406,9 +208,9 @@ func CreateOrUpdateLokiStack( return kverrors.New("failed to configure lokistack resources", "name", req.NamespacedName) } - // 1x.extra-small is used only for development, so the metrics will not + // 1x.demo is used only for development, so the metrics will not // be collected. - if opts.Stack.Size != lokiv1.SizeOneXExtraSmall && opts.Stack.Size != lokiv1.SizeOneXDemo { + if opts.Stack.Size != lokiv1.SizeOneXDemo { metrics.Collect(&opts.Stack, opts.Name) } diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go index 79928b4a82e5..4ba9a9affc36 100644 --- a/operator/internal/handlers/lokistack_create_or_update_test.go +++ b/operator/internal/handlers/lokistack_create_or_update_test.go @@ -13,7 +13,6 @@ import ( routev1 "github.com/openshift/api/route/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -73,42 +72,6 @@ var ( "issuerCAPath": []byte("/tmp/test/ca.pem"), }, } - - rulesCM = corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack-rules-0", - Namespace: "some-ns", - }, - } - - rulerSS = appsv1.StatefulSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "StatefulSet", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack-ruler", - Namespace: "some-ns", - }, - } - - invalidSecret = corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "some-stack-secret", - Namespace: "some-ns", - }, - Data: map[string][]byte{}, - } - - invalidCAConfigMap = corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "some-stack-ca-configmap", - Namespace: "some-ns", - }, - Data: map[string]string{}, - } ) func TestMain(m *testing.M) { @@ -341,8 +304,8 @@ func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) { Kind: stack.Kind, Name: stack.Name, UID: stack.UID, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), } k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error { @@ -488,8 +451,8 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing. Kind: "LokiStack", Name: "my-stack", UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, @@ -573,8 +536,6 @@ func TestCreateOrUpdateLokiStack_WhenCreateReturnsError_ContinueWithOtherObjects }, } - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { _, isLokiStack := object.(*lokiv1.LokiStack) if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { @@ -659,8 +620,8 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects Kind: "LokiStack", Name: "someStack", UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, @@ -681,8 +642,6 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects }, } - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { _, isLokiStack := object.(*lokiv1.LokiStack) if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { @@ -710,69 +669,7 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects require.Error(t, err) } -func TestCreateOrUpdateLokiStack_WhenMissingSecret_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Missing object storage secret", - Reason: lokiv1.ReasonMissingObjectStorageSecret, - Requeue: false, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - _, isLokiStack := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { - k.SetClientObject(object, stack) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates) - - // make sure error is returned - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) { +func TestCreateOrUpdateLokiStack_WhenInvalidQueryTimeout_SetDegraded(t *testing.T) { sw := &k8sfakes.FakeStatusWriter{} k := &k8sfakes.FakeClient{} r := ctrl.Request{ @@ -783,8 +680,8 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) { } degradedErr := &status.DegradedError{ - Message: "Invalid object storage secret contents: missing secret field", - Reason: lokiv1.ReasonInvalidObjectStorageSecret, + Message: `Error parsing query timeout: time: invalid duration "invalid"`, + Reason: lokiv1.ReasonQueryTimeoutInvalid, Requeue: false, } @@ -802,179 +699,37 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) { Storage: lokiv1.ObjectStorageSpec{ Schemas: []lokiv1.ObjectStorageSchema{ { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", + Version: lokiv1.ObjectStorageSchemaV12, + EffectiveDate: "2023-05-22", }, }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: invalidSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - _, isLokiStack := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { - k.SetClientObject(object, stack) - return nil - } - if name.Name == invalidSecret.Name { - k.SetClientObject(object, &invalidSecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates) - - // make sure error is returned - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_WithInvalidStorageSchema_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Invalid object storage schema contents: spec does not contain any schemas", - Reason: lokiv1.ReasonInvalidObjectStorageSchema, - Requeue: false, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{}, Secret: lokiv1.ObjectStorageSecretSpec{ Name: defaultSecret.Name, Type: lokiv1.ObjectStorageSecretS3, }, }, - }, - Status: lokiv1.LokiStackStatus{ - Storage: lokiv1.LokiStackStorageStatus{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - { - Version: lokiv1.ObjectStorageSchemaV12, - EffectiveDate: "2021-10-11", - }, - }, + Tenants: &lokiv1.TenantsSpec{ + Mode: "openshift", }, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - _, isLokiStack := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { - k.SetClientObject(object, stack) - return nil - } - if name.Name == defaultSecret.Name { - k.SetClientObject(object, &defaultSecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates) - - // make sure error is returned - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_WhenMissingCAConfigMap_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Missing object storage CA config map", - Reason: lokiv1.ReasonMissingObjectStorageCAConfigMap, - Requeue: false, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - TLS: &lokiv1.ObjectStorageTLSSpec{ - CASpec: lokiv1.CASpec{ - CA: "not-existing", + Limits: &lokiv1.LimitsSpec{ + Global: &lokiv1.LimitsTemplateSpec{ + QueryLimits: &lokiv1.QueryLimitSpec{ + QueryTimeout: "invalid", }, }, }, }, } - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. + // Create looks up the CR first, so we need to return our fake stack k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - _, isLokiStack := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { + if r.Name == name.Name && r.Namespace == name.Namespace { k.SetClientObject(object, stack) - return nil } - - if name.Name == defaultSecret.Name { + if defaultSecret.Name == name.Name { k.SetClientObject(object, &defaultSecret) - return nil } - - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") + return nil } k.StatusStub = func() client.StatusWriter { return sw } @@ -985,642 +740,3 @@ func TestCreateOrUpdateLokiStack_WhenMissingCAConfigMap_SetDegraded(t *testing.T require.Error(t, err) require.Equal(t, degradedErr, err) } - -func TestCreateOrUpdateLokiStack_WhenInvalidCAConfigMap_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Invalid object storage CA configmap contents: missing key or no contents", - Reason: lokiv1.ReasonInvalidObjectStorageCAConfigMap, - Requeue: false, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - TLS: &lokiv1.ObjectStorageTLSSpec{ - CASpec: lokiv1.CASpec{ - CA: invalidCAConfigMap.Name, - }, - }, - }, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - _, isLokiStack := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { - k.SetClientObject(object, stack) - return nil - } - if name.Name == defaultSecret.Name { - k.SetClientObject(object, &defaultSecret) - return nil - } - - if name.Name == invalidCAConfigMap.Name { - k.SetClientObject(object, &invalidCAConfigMap) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates) - - // make sure error is returned - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Invalid tenants configuration: mandatory configuration - missing OPA Url", - Reason: lokiv1.ReasonInvalidTenantsConfiguration, - Requeue: false, - } - - ff := configv1.FeatureGates{ - LokiStackGateway: true, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - Tenants: &lokiv1.TenantsSpec{ - Mode: "dynamic", - Authentication: []lokiv1.AuthenticationSpec{ - { - TenantName: "test", - TenantID: "1234", - OIDC: &lokiv1.OIDCSpec{ - Secret: &lokiv1.TenantSecretSpec{ - Name: defaultGatewaySecret.Name, - }, - }, - }, - }, - Authorization: nil, - }, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - _, isLokiStack := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { - k.SetClientObject(object, stack) - return nil - } - if defaultSecret.Name == name.Name { - k.SetClientObject(object, &defaultSecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff) - - // make sure error is returned - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Missing secrets for tenant test", - Reason: lokiv1.ReasonMissingGatewayTenantSecret, - Requeue: true, - } - - ff := configv1.FeatureGates{ - LokiStackGateway: true, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - Tenants: &lokiv1.TenantsSpec{ - Mode: "dynamic", - Authentication: []lokiv1.AuthenticationSpec{ - { - TenantName: "test", - TenantID: "1234", - OIDC: &lokiv1.OIDCSpec{ - Secret: &lokiv1.TenantSecretSpec{ - Name: defaultGatewaySecret.Name, - }, - }, - }, - }, - Authorization: &lokiv1.AuthorizationSpec{ - OPA: &lokiv1.OPASpec{ - URL: "some-url", - }, - }, - }, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - o, ok := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && ok { - k.SetClientObject(o, stack) - return nil - } - if defaultSecret.Name == name.Name { - k.SetClientObject(object, &defaultSecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff) - - // make sure error is returned to re-trigger reconciliation - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Invalid gateway tenant secret contents", - Reason: lokiv1.ReasonInvalidGatewayTenantSecret, - Requeue: true, - } - - ff := configv1.FeatureGates{ - LokiStackGateway: true, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - Tenants: &lokiv1.TenantsSpec{ - Mode: "dynamic", - Authentication: []lokiv1.AuthenticationSpec{ - { - TenantName: "test", - TenantID: "1234", - OIDC: &lokiv1.OIDCSpec{ - Secret: &lokiv1.TenantSecretSpec{ - Name: invalidSecret.Name, - }, - }, - }, - }, - Authorization: &lokiv1.AuthorizationSpec{ - OPA: &lokiv1.OPASpec{ - URL: "some-url", - }, - }, - }, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - o, ok := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && ok { - k.SetClientObject(o, stack) - return nil - } - if defaultSecret.Name == name.Name { - k.SetClientObject(object, &defaultSecret) - return nil - } - if name.Name == invalidSecret.Name { - k.SetClientObject(object, &invalidSecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff) - - // make sure error is returned to re-trigger reconciliation - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: "Invalid tenants configuration - TenantsSpec cannot be nil when gateway flag is enabled", - Reason: lokiv1.ReasonInvalidTenantsConfiguration, - Requeue: false, - } - - ff := configv1.FeatureGates{ - LokiStackGateway: true, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - Tenants: nil, - }, - } - - // GetStub looks up the CR first, so we need to return our fake stack - // return NotFound for everything else to trigger create. - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - o, ok := object.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && ok { - k.SetClientObject(o, stack) - return nil - } - if defaultSecret.Name == name.Name { - k.SetClientObject(object, &defaultSecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something is not found") - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff) - - // make sure error is returned - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_WhenInvalidQueryTimeout_SetDegraded(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - degradedErr := &status.DegradedError{ - Message: `Error parsing query timeout: time: invalid duration "invalid"`, - Reason: lokiv1.ReasonQueryTimeoutInvalid, - Requeue: false, - } - - stack := &lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV12, - EffectiveDate: "2023-05-22", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - Tenants: &lokiv1.TenantsSpec{ - Mode: "openshift", - }, - Limits: &lokiv1.LimitsSpec{ - Global: &lokiv1.LimitsTemplateSpec{ - QueryLimits: &lokiv1.QueryLimitSpec{ - QueryTimeout: "invalid", - }, - }, - }, - }, - } - - // Create looks up the CR first, so we need to return our fake stack - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - if r.Name == name.Name && r.Namespace == name.Namespace { - k.SetClientObject(object, stack) - } - if defaultSecret.Name == name.Name { - k.SetClientObject(object, &defaultSecret) - } - return nil - } - - k.StatusStub = func() client.StatusWriter { return sw } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates) - - // make sure error is returned - require.Error(t, err) - require.Equal(t, degradedErr, err) -} - -func TestCreateOrUpdateLokiStack_RemovesRulerResourcesWhenDisabled(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - stack := lokiv1.LokiStack{ - TypeMeta: metav1.TypeMeta{ - Kind: "LokiStack", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - }, - Spec: lokiv1.LokiStackSpec{ - Size: lokiv1.SizeOneXExtraSmall, - Storage: lokiv1.ObjectStorageSpec{ - Schemas: []lokiv1.ObjectStorageSchema{ - { - Version: lokiv1.ObjectStorageSchemaV11, - EffectiveDate: "2020-10-11", - }, - }, - Secret: lokiv1.ObjectStorageSecretSpec{ - Name: defaultSecret.Name, - Type: lokiv1.ObjectStorageSecretS3, - }, - }, - Rules: &lokiv1.RulesSpec{ - Enabled: true, - }, - Tenants: &lokiv1.TenantsSpec{ - Mode: "dynamic", - Authentication: []lokiv1.AuthenticationSpec{ - { - TenantName: "test", - TenantID: "1234", - OIDC: &lokiv1.OIDCSpec{ - Secret: &lokiv1.TenantSecretSpec{ - Name: defaultGatewaySecret.Name, - }, - }, - }, - }, - Authorization: &lokiv1.AuthorizationSpec{ - OPA: &lokiv1.OPASpec{ - URL: "some-url", - }, - }, - }, - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object, _ ...client.GetOption) error { - _, ok := out.(*lokiv1.RulerConfig) - if ok { - return apierrors.NewNotFound(schema.GroupResource{}, "no ruler config") - } - - _, isLokiStack := out.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { - k.SetClientObject(out, &stack) - return nil - } - if defaultSecret.Name == name.Name { - k.SetClientObject(out, &defaultSecret) - return nil - } - if defaultGatewaySecret.Name == name.Name { - k.SetClientObject(out, &defaultGatewaySecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - - k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error { - assert.Equal(t, r.Namespace, o.GetNamespace()) - return nil - } - - k.StatusStub = func() client.StatusWriter { return sw } - - k.DeleteStub = func(_ context.Context, o client.Object, _ ...client.DeleteOption) error { - assert.Equal(t, r.Namespace, o.GetNamespace()) - return nil - } - - k.ListStub = func(_ context.Context, list client.ObjectList, options ...client.ListOption) error { - switch list.(type) { - case *corev1.ConfigMapList: - k.SetClientObjectList(list, &corev1.ConfigMapList{ - Items: []corev1.ConfigMap{ - rulesCM, - }, - }) - } - return nil - } - - err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates) - require.NoError(t, err) - - // make sure create was called - require.NotZero(t, k.CreateCallCount()) - - // make sure delete not called - require.Zero(t, k.DeleteCallCount()) - - // Disable the ruler - stack.Spec.Rules.Enabled = false - - // Get should return ruler resources - k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object, _ ...client.GetOption) error { - _, ok := out.(*lokiv1.RulerConfig) - if ok { - return apierrors.NewNotFound(schema.GroupResource{}, "no ruler config") - } - if rulesCM.Name == name.Name { - k.SetClientObject(out, &rulesCM) - return nil - } - if rulerSS.Name == name.Name { - k.SetClientObject(out, &rulerSS) - return nil - } - - _, isLokiStack := out.(*lokiv1.LokiStack) - if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack { - k.SetClientObject(out, &stack) - return nil - } - if defaultSecret.Name == name.Name { - k.SetClientObject(out, &defaultSecret) - return nil - } - if defaultGatewaySecret.Name == name.Name { - k.SetClientObject(out, &defaultGatewaySecret) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - err = CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates) - require.NoError(t, err) - - // make sure delete was called twice (delete rules configmap and ruler statefulset) - require.Equal(t, 2, k.DeleteCallCount()) -} diff --git a/operator/internal/handlers/lokistack_rotate_certs_test.go b/operator/internal/handlers/lokistack_rotate_certs_test.go index 1ac48b5ebb13..7ace87de85cd 100644 --- a/operator/internal/handlers/lokistack_rotate_certs_test.go +++ b/operator/internal/handlers/lokistack_rotate_certs_test.go @@ -12,7 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -230,8 +230,8 @@ func TestCreateOrRotateCertificates_SetsOwnerRefOnAllObjects(t *testing.T) { Kind: stack.Kind, Name: stack.Name, UID: stack.UID, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), } k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error { @@ -376,8 +376,8 @@ func TestCreateOrRotateCertificates_WhenGetReturnsNoError_UpdateObjects(t *testi Kind: "LokiStack", Name: "my-stack", UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, @@ -530,8 +530,8 @@ func TestCreateOrRotateCertificates_WhenUpdateReturnsError_ContinueWithOtherObje Kind: "LokiStack", Name: "someStack", UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, }, diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go index fc2d9cb602f7..0c5c6b038a1c 100644 --- a/operator/internal/manifests/compactor.go +++ b/operator/internal/manifests/compactor.go @@ -10,7 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -147,8 +147,8 @@ func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet { }, Spec: appsv1.StatefulSetSpec{ PodManagementPolicy: appsv1.OrderedReadyPodManagement, - RevisionHistoryLimit: pointer.Int32(10), - Replicas: pointer.Int32(opts.Stack.Template.Compactor.Replicas), + RevisionHistoryLimit: ptr.To(defaultRevHistoryLimit), + Replicas: ptr.To(opts.Stack.Template.Compactor.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: labels.Merge(l, GossipLabels()), }, @@ -177,7 +177,7 @@ func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet { }, }, VolumeMode: &volumeFileSystemMode, - StorageClassName: pointer.String(opts.Stack.StorageClassName), + StorageClassName: ptr.To(opts.Stack.StorageClassName), }, }, }, diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go index 2a5df6f135cd..77ced809da6e 100644 --- a/operator/internal/manifests/config_test.go +++ b/operator/internal/manifests/config_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -118,7 +118,7 @@ func randomConfigOptions() Options { Operator: corev1.TolerationOpEqual, Value: uuid.New().String(), Effect: corev1.TaintEffectNoExecute, - TolerationSeconds: pointer.Int64(rand.Int63()), + TolerationSeconds: ptr.To[int64](rand.Int63()), }, }, }, @@ -133,7 +133,7 @@ func randomConfigOptions() Options { Operator: corev1.TolerationOpEqual, Value: uuid.New().String(), Effect: corev1.TaintEffectNoExecute, - TolerationSeconds: pointer.Int64(rand.Int63()), + TolerationSeconds: ptr.To[int64](rand.Int63()), }, }, }, @@ -148,7 +148,7 @@ func randomConfigOptions() Options { Operator: corev1.TolerationOpEqual, Value: uuid.New().String(), Effect: corev1.TaintEffectNoExecute, - TolerationSeconds: pointer.Int64(rand.Int63()), + TolerationSeconds: ptr.To[int64](rand.Int63()), }, }, }, @@ -163,7 +163,7 @@ func randomConfigOptions() Options { Operator: corev1.TolerationOpEqual, Value: uuid.New().String(), Effect: corev1.TaintEffectNoExecute, - TolerationSeconds: pointer.Int64(rand.Int63()), + TolerationSeconds: ptr.To[int64](rand.Int63()), }, }, }, @@ -178,7 +178,7 @@ func randomConfigOptions() Options { Operator: corev1.TolerationOpEqual, Value: uuid.New().String(), Effect: corev1.TaintEffectNoExecute, - TolerationSeconds: pointer.Int64(rand.Int63()), + TolerationSeconds: ptr.To[int64](rand.Int63()), }, }, }, @@ -193,7 +193,7 @@ func randomConfigOptions() Options { Operator: corev1.TolerationOpEqual, Value: uuid.New().String(), Effect: corev1.TaintEffectNoExecute, - TolerationSeconds: pointer.Int64(rand.Int63()), + TolerationSeconds: ptr.To[int64](rand.Int63()), }, }, }, @@ -723,12 +723,12 @@ func TestConfigOptions_RulerOverrides_OCPApplicationTenant(t *testing.T) { RefreshInterval: "1m", Notifier: &config.NotifierConfig{ TLS: config.TLSConfig{ - ServerName: pointer.String("alertmanager-user-workload.openshift-user-workload-monitoring.svc.cluster.local"), - CAPath: pointer.String("/var/run/ca/alertmanager/service-ca.crt"), + ServerName: ptr.To("alertmanager-user-workload.openshift-user-workload-monitoring.svc.cluster.local"), + CAPath: ptr.To("/var/run/ca/alertmanager/service-ca.crt"), }, HeaderAuth: config.HeaderAuth{ - Type: pointer.String("Bearer"), - CredentialsFile: pointer.String("/var/run/secrets/kubernetes.io/serviceaccount/token"), + Type: ptr.To("Bearer"), + CredentialsFile: ptr.To("/var/run/secrets/kubernetes.io/serviceaccount/token"), }, }, }, @@ -848,14 +848,14 @@ func TestConfigOptions_RulerOverrides(t *testing.T) { }, Client: &lokiv1.AlertManagerClientConfig{ TLS: &lokiv1.AlertManagerClientTLSConfig{ - ServerName: pointer.String("application.svc"), - CAPath: pointer.String("/tenant/application/alertmanager/ca.crt"), - CertPath: pointer.String("/tenant/application/alertmanager/cert.crt"), - KeyPath: pointer.String("/tenant/application/alertmanager/cert.key"), + ServerName: ptr.To("application.svc"), + CAPath: ptr.To("/tenant/application/alertmanager/ca.crt"), + CertPath: ptr.To("/tenant/application/alertmanager/cert.crt"), + KeyPath: ptr.To("/tenant/application/alertmanager/cert.key"), }, HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - Type: pointer.String("Bearer"), - Credentials: pointer.String("letmeinplz"), + Type: ptr.To("Bearer"), + Credentials: ptr.To("letmeinplz"), }, }, }, @@ -872,14 +872,14 @@ func TestConfigOptions_RulerOverrides(t *testing.T) { }, Client: &lokiv1.AlertManagerClientConfig{ TLS: &lokiv1.AlertManagerClientTLSConfig{ - ServerName: pointer.String("other.svc"), - CAPath: pointer.String("/tenant/other/alertmanager/ca.crt"), - CertPath: pointer.String("/tenant/other/alertmanager/cert.crt"), - KeyPath: pointer.String("/tenant/other/alertmanager/cert.key"), + ServerName: ptr.To("other.svc"), + CAPath: ptr.To("/tenant/other/alertmanager/ca.crt"), + CertPath: ptr.To("/tenant/other/alertmanager/cert.crt"), + KeyPath: ptr.To("/tenant/other/alertmanager/cert.key"), }, BasicAuth: &lokiv1.AlertManagerClientBasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, }, }, @@ -906,14 +906,14 @@ func TestConfigOptions_RulerOverrides(t *testing.T) { ExternalLabels: map[string]string{"external": "label"}, Notifier: &config.NotifierConfig{ TLS: config.TLSConfig{ - ServerName: pointer.String("application.svc"), - CAPath: pointer.String("/tenant/application/alertmanager/ca.crt"), - CertPath: pointer.String("/tenant/application/alertmanager/cert.crt"), - KeyPath: pointer.String("/tenant/application/alertmanager/cert.key"), + ServerName: ptr.To("application.svc"), + CAPath: ptr.To("/tenant/application/alertmanager/ca.crt"), + CertPath: ptr.To("/tenant/application/alertmanager/cert.crt"), + KeyPath: ptr.To("/tenant/application/alertmanager/cert.key"), }, HeaderAuth: config.HeaderAuth{ - Type: pointer.String("Bearer"), - Credentials: pointer.String("letmeinplz"), + Type: ptr.To("Bearer"), + Credentials: ptr.To("letmeinplz"), }, }, }, @@ -930,14 +930,14 @@ func TestConfigOptions_RulerOverrides(t *testing.T) { ExternalLabels: map[string]string{"external1": "label1"}, Notifier: &config.NotifierConfig{ TLS: config.TLSConfig{ - ServerName: pointer.String("other.svc"), - CAPath: pointer.String("/tenant/other/alertmanager/ca.crt"), - CertPath: pointer.String("/tenant/other/alertmanager/cert.crt"), - KeyPath: pointer.String("/tenant/other/alertmanager/cert.key"), + ServerName: ptr.To("other.svc"), + CAPath: ptr.To("/tenant/other/alertmanager/ca.crt"), + CertPath: ptr.To("/tenant/other/alertmanager/cert.crt"), + KeyPath: ptr.To("/tenant/other/alertmanager/cert.key"), }, BasicAuth: config.BasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, }, }, @@ -1072,12 +1072,12 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) { RefreshInterval: "1m", Notifier: &config.NotifierConfig{ TLS: config.TLSConfig{ - ServerName: pointer.String("alertmanager-user-workload.openshift-user-workload-monitoring.svc.cluster.local"), - CAPath: pointer.String("/var/run/ca/alertmanager/service-ca.crt"), + ServerName: ptr.To("alertmanager-user-workload.openshift-user-workload-monitoring.svc.cluster.local"), + CAPath: ptr.To("/var/run/ca/alertmanager/service-ca.crt"), }, HeaderAuth: config.HeaderAuth{ - Type: pointer.String("Bearer"), - CredentialsFile: pointer.String("/var/run/secrets/kubernetes.io/serviceaccount/token"), + Type: ptr.To("Bearer"), + CredentialsFile: ptr.To("/var/run/secrets/kubernetes.io/serviceaccount/token"), }, }, }, @@ -1150,12 +1150,12 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) { RefreshInterval: "1m", Notifier: &config.NotifierConfig{ TLS: config.TLSConfig{ - ServerName: pointer.String("alertmanager-user-workload.openshift-user-workload-monitoring.svc.cluster.local"), - CAPath: pointer.String("/var/run/ca/alertmanager/service-ca.crt"), + ServerName: ptr.To("alertmanager-user-workload.openshift-user-workload-monitoring.svc.cluster.local"), + CAPath: ptr.To("/var/run/ca/alertmanager/service-ca.crt"), }, HeaderAuth: config.HeaderAuth{ - Type: pointer.String("Bearer"), - CredentialsFile: pointer.String("/var/run/secrets/kubernetes.io/serviceaccount/token"), + Type: ptr.To("Bearer"), + CredentialsFile: ptr.To("/var/run/secrets/kubernetes.io/serviceaccount/token"), }, }, }, diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go index e92b09c5ba9d..7b5a0a033f19 100644 --- a/operator/internal/manifests/distributor.go +++ b/operator/internal/manifests/distributor.go @@ -10,7 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -146,7 +146,7 @@ func NewDistributorDeployment(opts Options) *appsv1.Deployment { Labels: l, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32(opts.Stack.Template.Distributor.Replicas), + Replicas: ptr.To(opts.Stack.Template.Distributor.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: labels.Merge(l, GossipLabels()), }, diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go index 03bce6453f36..1ba3a9905e57 100644 --- a/operator/internal/manifests/gateway.go +++ b/operator/internal/manifests/gateway.go @@ -15,7 +15,7 @@ import ( policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/grafana/loki/operator/internal/manifests/internal/gateway" @@ -244,7 +244,7 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment { Labels: l, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32(opts.Stack.Template.Gateway.Replicas), + Replicas: ptr.To(opts.Stack.Template.Gateway.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: l, }, @@ -354,7 +354,7 @@ func NewServiceAccount(opts Options) client.Object { Name: GatewayName(opts.Name), Namespace: opts.Namespace, }, - AutomountServiceAccountToken: pointer.Bool(true), + AutomountServiceAccountToken: ptr.To(true), } } diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go index 18ffc6cdc32a..f4dbbe8f6f24 100644 --- a/operator/internal/manifests/indexgateway.go +++ b/operator/internal/manifests/indexgateway.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -153,8 +153,8 @@ func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet { }, Spec: appsv1.StatefulSetSpec{ PodManagementPolicy: appsv1.OrderedReadyPodManagement, - RevisionHistoryLimit: pointer.Int32(10), - Replicas: pointer.Int32(opts.Stack.Template.IndexGateway.Replicas), + RevisionHistoryLimit: ptr.To(defaultRevHistoryLimit), + Replicas: ptr.To(opts.Stack.Template.IndexGateway.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: labels.Merge(l, GossipLabels()), }, @@ -182,7 +182,7 @@ func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet { corev1.ResourceStorage: opts.ResourceRequirements.IndexGateway.PVCSize, }, }, - StorageClassName: pointer.String(opts.Stack.StorageClassName), + StorageClassName: ptr.To(opts.Stack.StorageClassName), VolumeMode: &volumeFileSystemMode, }, }, diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go index 50740a6a640d..6e7a50af4806 100644 --- a/operator/internal/manifests/ingester.go +++ b/operator/internal/manifests/ingester.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -163,8 +163,8 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet { }, Spec: appsv1.StatefulSetSpec{ PodManagementPolicy: appsv1.OrderedReadyPodManagement, - RevisionHistoryLimit: pointer.Int32(10), - Replicas: pointer.Int32(opts.Stack.Template.Ingester.Replicas), + RevisionHistoryLimit: ptr.To(defaultRevHistoryLimit), + Replicas: ptr.To(opts.Stack.Template.Ingester.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: labels.Merge(l, GossipLabels()), }, @@ -192,7 +192,7 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet { corev1.ResourceStorage: opts.ResourceRequirements.Ingester.PVCSize, }, }, - StorageClassName: pointer.String(opts.Stack.StorageClassName), + StorageClassName: ptr.To(opts.Stack.StorageClassName), VolumeMode: &volumeFileSystemMode, }, }, @@ -211,7 +211,7 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet { corev1.ResourceStorage: opts.ResourceRequirements.WALStorage.PVCSize, }, }, - StorageClassName: pointer.String(opts.Stack.StorageClassName), + StorageClassName: ptr.To(opts.Stack.StorageClassName), VolumeMode: &volumeFileSystemMode, }, }, diff --git a/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml b/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml index f378c49fd78c..6d2d978843dd 100644 --- a/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml +++ b/operator/internal/manifests/internal/alerts/prometheus-alerts.yaml @@ -175,3 +175,13 @@ groups: for: 15m labels: severity: warning + - alert: LokistackSchemaUpgradesRequired + annotations: + message: |- + Object storage schema needs upgrade. + summary: "The applied storage schema config is old and should be upgraded." + runbook_url: "[[ .RunbookURL ]]#Lokistack-Schema-Upgrades-Required" + expr: sum by(stack_id) (lokistack_warnings_count) > 0 + labels: + severity: warning + resource: '{{ $labels.stack_id}}' diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go index 27f7b7ed02cb..187dc6514202 100644 --- a/operator/internal/manifests/internal/config/build_test.go +++ b/operator/internal/manifests/internal/config/build_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/stretchr/testify/require" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" configv1 "github.com/grafana/loki/operator/apis/config/v1" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" @@ -3247,20 +3247,20 @@ overrides: }, Notifier: &NotifierConfig{ TLS: TLSConfig{ - ServerName: pointer.String("custom-servername"), - CertPath: pointer.String("custom/path"), - KeyPath: pointer.String("custom/key"), - CAPath: pointer.String("custom/CA"), - InsecureSkipVerify: pointer.Bool(false), + ServerName: ptr.To("custom-servername"), + CertPath: ptr.To("custom/path"), + KeyPath: ptr.To("custom/key"), + CAPath: ptr.To("custom/CA"), + InsecureSkipVerify: ptr.To(false), }, BasicAuth: BasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, HeaderAuth: HeaderAuth{ - CredentialsFile: pointer.String("cred/file"), - Type: pointer.String("auth"), - Credentials: pointer.String("creds"), + CredentialsFile: ptr.To("cred/file"), + Type: ptr.To("auth"), + Credentials: ptr.To("creds"), }, }, }, diff --git a/operator/internal/manifests/mutate_test.go b/operator/internal/manifests/mutate_test.go index 18407c3c23da..e62acd6d25c7 100644 --- a/operator/internal/manifests/mutate_test.go +++ b/operator/internal/manifests/mutate_test.go @@ -13,7 +13,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestGetMutateFunc_MutateObjectMeta(t *testing.T) { @@ -28,8 +28,8 @@ func TestGetMutateFunc_MutateObjectMeta(t *testing.T) { OwnerReferences: []metav1.OwnerReference{ { APIVersion: "loki.grafana.com/v1", - BlockOwnerDeletion: pointer.Bool(true), - Controller: pointer.Bool(true), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), Kind: "LokiStack", Name: "lokistack-testing", UID: "6128aa83-de7f-47c0-abf2-4a380713b599", @@ -504,7 +504,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) { "test": "test", }, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -525,7 +525,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) { "and": "another", }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -552,7 +552,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) { "test": "test", }, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -574,7 +574,7 @@ func TestMutateFuncFor_MutateDeploymentSpec(t *testing.T) { "and": "another", }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -665,7 +665,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) { "test": "test", }, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -693,7 +693,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) { "and": "another", }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -728,7 +728,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) { "test": "test", }, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -757,7 +757,7 @@ func TestMutateFuncFor_MutateStatefulSetSpec(t *testing.T) { "and": "another", }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -1108,7 +1108,7 @@ func TestGetMutateFunc_MutateRoute(t *testing.T) { To: routev1.RouteTargetReference{ Kind: "Service", Name: "a-service", - Weight: pointer.Int32(100), + Weight: ptr.To[int32](100), }, TLS: &routev1.TLSConfig{ Termination: routev1.TLSTerminationReencrypt, diff --git a/operator/internal/manifests/openshift/configure.go b/operator/internal/manifests/openshift/configure.go index 29087b744b6a..e83e6cec0aed 100644 --- a/operator/internal/manifests/openshift/configure.go +++ b/operator/internal/manifests/openshift/configure.go @@ -8,7 +8,7 @@ import ( monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -289,12 +289,12 @@ func configureUserWorkloadAM(configOpt *config.Options, token, caPath, monitorSe RefreshInterval: "1m", Notifier: &config.NotifierConfig{ TLS: config.TLSConfig{ - ServerName: pointer.String(monitorServerName), + ServerName: ptr.To(monitorServerName), CAPath: &caPath, }, HeaderAuth: config.HeaderAuth{ CredentialsFile: &token, - Type: pointer.String("Bearer"), + Type: ptr.To("Bearer"), }, }, } diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json index e1adb4dd6cc0..df5ea66e6d2a 100644 --- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json +++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json @@ -217,9 +217,9 @@ "group": "A", "mode": "normal" } - } - }, - "unit": "s" + }, + "unit": "s" + } }, "fill": 1, "id": 3, @@ -493,9 +493,9 @@ "group": "A", "mode": "normal" } - } - }, - "unit": "s" + }, + "unit": "s" + } }, "fill": 1, "id": 6, @@ -769,9 +769,9 @@ "group": "A", "mode": "normal" } - } - }, - "unit": "s" + }, + "unit": "s" + } }, "fill": 1, "id": 9, @@ -1045,9 +1045,9 @@ "group": "A", "mode": "normal" } - } - }, - "unit": "s" + }, + "unit": "s" + } }, "fill": 1, "id": 15, diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json index 58107485d370..8053d353b113 100644 --- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json +++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json @@ -66,7 +66,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n", + "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-distributor-http\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{status}}", @@ -142,7 +142,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"})) * 1e3", + "expr": "histogram_quantile(0.99, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "99th Percentile", @@ -150,7 +150,7 @@ "step": 10 }, { - "expr": "histogram_quantile(0.50, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"})) * 1e3", + "expr": "histogram_quantile(0.50, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "50th Percentile", @@ -158,7 +158,7 @@ "step": 10 }, { - "expr": "1e3 * sum(namespace_job_route:loki_request_duration_seconds_sum:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"}) / sum(namespace_job_route:loki_request_duration_seconds_count:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"})", + "expr": "1e3 * sum(namespace_job_route:loki_request_duration_seconds_sum:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}) / sum(namespace_job_route:loki_request_duration_seconds_count:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "Average", @@ -246,7 +246,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval]))", + "expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "bytes", @@ -322,7 +322,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval]))", + "expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{tenant}}", diff --git a/operator/internal/manifests/openshift/route.go b/operator/internal/manifests/openshift/route.go index 140595b9bed6..b621fbf9a36e 100644 --- a/operator/internal/manifests/openshift/route.go +++ b/operator/internal/manifests/openshift/route.go @@ -6,7 +6,7 @@ import ( routev1 "github.com/openshift/api/route/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -29,7 +29,7 @@ func BuildRoute(opts Options) client.Object { To: routev1.RouteTargetReference{ Kind: "Service", Name: opts.BuildOpts.GatewaySvcName, - Weight: pointer.Int32(100), + Weight: ptr.To[int32](100), }, Port: &routev1.RoutePort{ TargetPort: intstr.FromString(opts.BuildOpts.GatewaySvcTargetPort), diff --git a/operator/internal/manifests/openshift/serviceaccount.go b/operator/internal/manifests/openshift/serviceaccount.go index 216331cf13a2..74ace7a89a6e 100644 --- a/operator/internal/manifests/openshift/serviceaccount.go +++ b/operator/internal/manifests/openshift/serviceaccount.go @@ -3,7 +3,7 @@ package openshift import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -21,6 +21,6 @@ func BuildRulerServiceAccount(opts Options) client.Object { Name: rulerServiceAccountName(opts), Namespace: opts.BuildOpts.LokiStackNamespace, }, - AutomountServiceAccountToken: pointer.Bool(true), + AutomountServiceAccountToken: ptr.To(true), } } diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go index ad5bd3cdda34..c807fe8ed1f0 100644 --- a/operator/internal/manifests/querier.go +++ b/operator/internal/manifests/querier.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -152,7 +152,7 @@ func NewQuerierDeployment(opts Options) *appsv1.Deployment { Labels: l, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32(opts.Stack.Template.Querier.Replicas), + Replicas: ptr.To(opts.Stack.Template.Querier.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: labels.Merge(l, GossipLabels()), }, diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go index 0cacb3076aae..e1023e872371 100644 --- a/operator/internal/manifests/query-frontend.go +++ b/operator/internal/manifests/query-frontend.go @@ -10,7 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/grafana/loki/operator/internal/manifests/internal/config" @@ -158,7 +158,7 @@ func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment { Labels: l, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32(opts.Stack.Template.QueryFrontend.Replicas), + Replicas: ptr.To(opts.Stack.Template.QueryFrontend.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: labels.Merge(l, GossipLabels()), }, diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go index 8e44f5834fef..c34adb765ee7 100644 --- a/operator/internal/manifests/ruler.go +++ b/operator/internal/manifests/ruler.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" @@ -202,8 +202,8 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet { UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ Type: appsv1.RollingUpdateStatefulSetStrategyType, }, - RevisionHistoryLimit: pointer.Int32(10), - Replicas: pointer.Int32(opts.Stack.Template.Ruler.Replicas), + RevisionHistoryLimit: ptr.To(defaultRevHistoryLimit), + Replicas: ptr.To(opts.Stack.Template.Ruler.Replicas), Selector: &metav1.LabelSelector{ MatchLabels: labels.Merge(l, GossipLabels()), }, @@ -231,7 +231,7 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet { corev1.ResourceStorage: opts.ResourceRequirements.Ruler.PVCSize, }, }, - StorageClassName: pointer.String(opts.Stack.StorageClassName), + StorageClassName: ptr.To(opts.Stack.StorageClassName), VolumeMode: &volumeFileSystemMode, }, }, @@ -250,7 +250,7 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet { corev1.ResourceStorage: opts.ResourceRequirements.WALStorage.PVCSize, }, }, - StorageClassName: pointer.String(opts.Stack.StorageClassName), + StorageClassName: ptr.To(opts.Stack.StorageClassName), VolumeMode: &volumeFileSystemMode, }, }, diff --git a/operator/internal/manifests/securitycontext.go b/operator/internal/manifests/securitycontext.go index 833f8bf009ee..5de7cf2d2d36 100644 --- a/operator/internal/manifests/securitycontext.go +++ b/operator/internal/manifests/securitycontext.go @@ -4,13 +4,13 @@ import ( "github.com/ViaQ/logerr/v2/kverrors" "github.com/imdario/mergo" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func configurePodSpecForRestrictedStandard(podSpec *corev1.PodSpec) error { podSecurityContext := corev1.PodSpec{ SecurityContext: &corev1.PodSecurityContext{ - RunAsNonRoot: pointer.Bool(true), + RunAsNonRoot: ptr.To(true), SeccompProfile: &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, }, @@ -19,7 +19,7 @@ func configurePodSpecForRestrictedStandard(podSpec *corev1.PodSpec) error { containerSecurityContext := corev1.Container{ SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: pointer.Bool(false), + AllowPrivilegeEscalation: ptr.To(false), Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, diff --git a/operator/internal/manifests/serviceaccount.go b/operator/internal/manifests/serviceaccount.go index 114af1ab3740..c692e1c67696 100644 --- a/operator/internal/manifests/serviceaccount.go +++ b/operator/internal/manifests/serviceaccount.go @@ -3,7 +3,7 @@ package manifests import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -20,6 +20,6 @@ func BuildServiceAccount(opts Options) client.Object { Namespace: opts.Namespace, Labels: commonLabels(opts.Name), }, - AutomountServiceAccountToken: pointer.Bool(true), + AutomountServiceAccountToken: ptr.To(true), } } diff --git a/operator/internal/manifests/storage/configure.go b/operator/internal/manifests/storage/configure.go index a48f07ff5b46..06956827db42 100644 --- a/operator/internal/manifests/storage/configure.go +++ b/operator/internal/manifests/storage/configure.go @@ -8,7 +8,7 @@ import ( "github.com/imdario/mergo" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" ) @@ -308,7 +308,7 @@ func saTokenVolume(opts Options) corev1.Volume { Sources: []corev1.VolumeProjection{ { ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ - ExpirationSeconds: pointer.Int64(saTokenExpiration), + ExpirationSeconds: ptr.To(saTokenExpiration), Path: corev1.ServiceAccountTokenKey, Audience: audience, }, diff --git a/operator/internal/manifests/storage/configure_test.go b/operator/internal/manifests/storage/configure_test.go index 8cf82d8c65db..220d0c6c701a 100644 --- a/operator/internal/manifests/storage/configure_test.go +++ b/operator/internal/manifests/storage/configure_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" ) @@ -376,7 +376,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) { { ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ Audience: "test", - ExpirationSeconds: pointer.Int64(3600), + ExpirationSeconds: ptr.To[int64](3600), Path: corev1.ServiceAccountTokenKey, }, }, @@ -469,7 +469,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) { { ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ Audience: "openshift", - ExpirationSeconds: pointer.Int64(3600), + ExpirationSeconds: ptr.To[int64](3600), Path: corev1.ServiceAccountTokenKey, }, }, diff --git a/operator/internal/manifests/storage/var.go b/operator/internal/manifests/storage/var.go index bbd377a43f46..16b7e10d3d1b 100644 --- a/operator/internal/manifests/storage/var.go +++ b/operator/internal/manifests/storage/var.go @@ -101,10 +101,10 @@ const ( // KeySwiftPassword is the secret data key for the OpenStack Swift password. KeySwiftUsername = "username" - saTokenVolumeK8sDirectory = "/var/run/secrets/kubernetes.io/serviceaccount" - saTokenVolumeOcpDirectory = "/var/run/secrets/openshift/serviceaccount" - saTokenVolumeName = "bound-sa-token" - saTokenExpiration = 3600 + saTokenVolumeK8sDirectory = "/var/run/secrets/kubernetes.io/serviceaccount" + saTokenVolumeOcpDirectory = "/var/run/secrets/openshift/serviceaccount" + saTokenVolumeName = "bound-sa-token" + saTokenExpiration int64 = 3600 secretDirectory = "/etc/storage/secrets" storageTLSVolume = "storage-tls" diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go index 6468e4426bf0..9bbe8a12c014 100644 --- a/operator/internal/manifests/var.go +++ b/operator/internal/manifests/var.go @@ -128,8 +128,9 @@ const ( var ( defaultTimeoutConfig = calculateHTTPTimeouts(lokiDefaultQueryTimeout) - defaultConfigMapMode = int32(420) - volumeFileSystemMode = corev1.PersistentVolumeFilesystem + defaultRevHistoryLimit int32 = 10 + defaultConfigMapMode int32 = 420 + volumeFileSystemMode = corev1.PersistentVolumeFilesystem ) func commonAnnotations(configHash, objStoreHash, rotationRequiredAt string) map[string]string { diff --git a/operator/internal/metrics/metrics.go b/operator/internal/metrics/metrics.go index 3c994f13c61e..fc87b76c6fa9 100644 --- a/operator/internal/metrics/metrics.go +++ b/operator/internal/metrics/metrics.go @@ -51,6 +51,14 @@ var ( }, []string{"size", "stack_id"}, ) + + lokistackWarningsCount = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "lokistack_warnings_count", + Help: "Counts the number of warnings set on a LokiStack.", + }, + []string{"reason", "stack_id"}, + ) ) // RegisterMetricCollectors registers the prometheus collectors with the k8 default metrics @@ -60,6 +68,7 @@ func RegisterMetricCollectors() { userDefinedLimitsMetric, globalStreamLimitMetric, averageTenantStreamLimitMetric, + lokistackWarningsCount, } for _, collector := range metricCollectors { @@ -104,6 +113,17 @@ func Collect(spec *lokiv1.LokiStackSpec, stackName string) { setGlobalStreamLimitMetric(size, stackName, globalRate) setAverageTenantStreamLimitMetric(size, stackName, tenantRate) } + + if len(spec.Storage.Schemas) > 0 && spec.Storage.Schemas[len(spec.Storage.Schemas)-1].Version != lokiv1.ObjectStorageSchemaV13 { + setLokistackSchemaUpgradesRequired(stackName, true) + } +} + +func setLokistackSchemaUpgradesRequired(identifier string, active bool) { + lokistackWarningsCount.With(prometheus.Labels{ + "reason": string(lokiv1.ReasonStorageNeedsSchemaUpdate), + "stack_id": identifier, + }).Set(boolValue(active)) } func setDeploymentMetric(size lokiv1.LokiStackSizeType, identifier string, active bool) { diff --git a/operator/internal/validation/rulerconfig_test.go b/operator/internal/validation/rulerconfig_test.go index 158ad9042f3a..60cc93033a61 100644 --- a/operator/internal/validation/rulerconfig_test.go +++ b/operator/internal/validation/rulerconfig_test.go @@ -9,7 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/validation" @@ -26,8 +26,8 @@ var rctt = []struct { AlertManagerSpec: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ BasicAuth: &lokiv1.AlertManagerClientBasicAuth{ - Username: pointer.String("user"), - Password: pointer.String("pass"), + Username: ptr.To("user"), + Password: ptr.To("pass"), }, }, }, @@ -36,8 +36,8 @@ var rctt = []struct { AlertManagerOverrides: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ BasicAuth: &lokiv1.AlertManagerClientBasicAuth{ - Username: pointer.String("user1"), - Password: pointer.String("pass1"), + Username: ptr.To("user1"), + Password: ptr.To("pass1"), }, }, }, @@ -51,7 +51,7 @@ var rctt = []struct { AlertManagerSpec: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - Credentials: pointer.String("creds"), + Credentials: ptr.To("creds"), }, }, }, @@ -60,7 +60,7 @@ var rctt = []struct { AlertManagerOverrides: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - Credentials: pointer.String("creds1"), + Credentials: ptr.To("creds1"), }, }, }, @@ -74,7 +74,7 @@ var rctt = []struct { AlertManagerSpec: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - CredentialsFile: pointer.String("creds-file"), + CredentialsFile: ptr.To("creds-file"), }, }, }, @@ -83,7 +83,7 @@ var rctt = []struct { AlertManagerOverrides: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - CredentialsFile: pointer.String("creds-file1"), + CredentialsFile: ptr.To("creds-file1"), }, }, }, @@ -97,7 +97,7 @@ var rctt = []struct { AlertManagerSpec: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - Credentials: pointer.String("creds"), + Credentials: ptr.To("creds"), }, }, }, @@ -106,7 +106,7 @@ var rctt = []struct { AlertManagerOverrides: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - CredentialsFile: pointer.String("creds-file1"), + CredentialsFile: ptr.To("creds-file1"), }, }, }, @@ -120,8 +120,8 @@ var rctt = []struct { AlertManagerSpec: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - Credentials: pointer.String("creds"), - CredentialsFile: pointer.String("creds-file"), + Credentials: ptr.To("creds"), + CredentialsFile: ptr.To("creds-file"), }, }, }, @@ -130,8 +130,8 @@ var rctt = []struct { AlertManagerOverrides: &lokiv1.AlertManagerSpec{ Client: &lokiv1.AlertManagerClientConfig{ HeaderAuth: &lokiv1.AlertManagerClientHeaderAuth{ - Credentials: pointer.String("creds1"), - CredentialsFile: pointer.String("creds-file1"), + Credentials: ptr.To("creds1"), + CredentialsFile: ptr.To("creds-file1"), }, }, }, diff --git a/operator/jsonnet/config.libsonnet b/operator/jsonnet/config.libsonnet index efdc1c6103d5..82dc625da2a4 100644 --- a/operator/jsonnet/config.libsonnet +++ b/operator/jsonnet/config.libsonnet @@ -238,7 +238,6 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn distributor:: [ utils.selector.eq('namespace', '$namespace'), utils.selector.re('job', '.+-distributor-http'), - utils.selector.eq('route', 'loki_api_v1_push'), ], ingester:: [ utils.selector.eq('namespace', '$namespace'), diff --git a/operator/jsonnet/jsonnetfile.json b/operator/jsonnet/jsonnetfile.json index 4b25fb159b3d..2bc2549a3c60 100644 --- a/operator/jsonnet/jsonnetfile.json +++ b/operator/jsonnet/jsonnetfile.json @@ -8,7 +8,7 @@ "subdir": "production/loki-mixin" } }, - "version": "bd505f8e2d37172ff35a89f4ac42efec9566a263" + "version": "0694d797dec010393567704211638219c1971b46" } ], "legacyImports": true diff --git a/operator/jsonnet/jsonnetfile.lock.json b/operator/jsonnet/jsonnetfile.lock.json index 27d2e6e8756c..3a0710db7565 100644 --- a/operator/jsonnet/jsonnetfile.lock.json +++ b/operator/jsonnet/jsonnetfile.lock.json @@ -38,8 +38,8 @@ "subdir": "production/loki-mixin" } }, - "version": "bd505f8e2d37172ff35a89f4ac42efec9566a263", - "sum": "yiXXBAcWfMkYSJthU2OZSgHHmveWvmRT6aM1V0MaAjs=" + "version": "0694d797dec010393567704211638219c1971b46", + "sum": "Pw/9T/ZRjXLqTivU5xkJnrP5kFdET2FDUjjG1G96GmQ=" }, { "source": { diff --git a/operator/main.go b/operator/main.go index 6b101175407e..654cd11d9f0a 100644 --- a/operator/main.go +++ b/operator/main.go @@ -2,8 +2,6 @@ package main import ( "flag" - "net/http" - "net/http/pprof" "os" "github.com/ViaQ/logerr/v2/kverrors" @@ -21,6 +19,7 @@ import ( lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1" lokictrl "github.com/grafana/loki/operator/controllers/loki" + "github.com/grafana/loki/operator/internal/config" "github.com/grafana/loki/operator/internal/metrics" "github.com/grafana/loki/operator/internal/operator" "github.com/grafana/loki/operator/internal/validation" @@ -59,14 +58,10 @@ func main() { var err error - ctrlCfg := ctrlconfigv1.ProjectConfig{} - options := ctrl.Options{Scheme: scheme} - if configFile != "" { - options, err = options.AndFrom(ctrl.ConfigFile().AtPath(configFile).OfKind(&ctrlCfg)) //nolint:staticcheck - if err != nil { - logger.Error(err, "failed to parse controller manager config file") - os.Exit(1) - } + ctrlCfg, options, err := config.LoadConfig(scheme, configFile) + if err != nil { + logger.Error(err, "failed to load operator configuration") + os.Exit(1) } if ctrlCfg.Gates.LokiStackAlerts && !ctrlCfg.Gates.ServiceMonitors { @@ -218,35 +213,9 @@ func main() { logger.Info("registering metrics") metrics.RegisterMetricCollectors() - logger.Info("Registering profiling endpoints.") - err = registerProfiler(mgr) - if err != nil { - logger.Error(err, "failed to register extra pprof handler") - os.Exit(1) - } - logger.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { logger.Error(err, "problem running manager") os.Exit(1) } } - -func registerProfiler(m ctrl.Manager) error { - endpoints := map[string]http.HandlerFunc{ - "/debug/pprof/": pprof.Index, - "/debug/pprof/cmdline": pprof.Cmdline, - "/debug/pprof/profile": pprof.Profile, - "/debug/pprof/symbol": pprof.Symbol, - "/debug/pprof/trace": pprof.Trace, - } - - for path, handler := range endpoints { - err := m.AddMetricsExtraHandler(path, handler) - if err != nil { - return err - } - } - - return nil -} diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index 40e4646c7044..dbe307ff1882 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -51,6 +51,7 @@ import ( "github.com/grafana/loki/pkg/storage" v1 "github.com/grafana/loki/pkg/storage/bloom/v1" chunk_client "github.com/grafana/loki/pkg/storage/chunk/client" + "github.com/grafana/loki/pkg/storage/chunk/client/local" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper" @@ -166,10 +167,18 @@ func New( return nil, errors.Wrap(err, "create index shipper") } + // The ObjectClient does not expose the key encoder it uses, + // so check the concrete type and set the FSEncoder if needed. + var keyEncoder chunk_client.KeyEncoder + switch objectClient.(type) { + case *local.FSObjectClient: + keyEncoder = chunk_client.FSEncoder + } + c.storeClients[periodicConfig.From] = storeClient{ object: objectClient, index: index_storage.NewIndexStorageClient(objectClient, periodicConfig.IndexTables.PathPrefix), - chunk: chunk_client.NewClient(objectClient, nil, schemaConfig), + chunk: chunk_client.NewClient(objectClient, keyEncoder, schemaConfig), indexShipper: indexShipper, } } @@ -275,7 +284,7 @@ func (c *Compactor) compactTable(ctx context.Context, logger log.Logger, tableNa return fmt.Errorf("index store client not found for period starting at %s", schemaCfg.From.String()) } - _, tenants, err := sc.index.ListFiles(ctx, tableName, false) + _, tenants, err := sc.index.ListFiles(ctx, tableName, true) if err != nil { return fmt.Errorf("failed to list files for table %s: %w", tableName, err) } @@ -497,13 +506,12 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, localDst := createLocalDirName(c.cfg.WorkingDirectory, job) blockOptions := v1.NewBlockOptions(bt.GetNGramLength(), bt.GetNGramSkip()) - // TODO(poyzannur) enable once debugging is over - //defer func() { - // //clean up the bloom directory - // if err := os.RemoveAll(localDst); err != nil { - // level.Error(logger).Log("msg", "failed to remove block directory", "dir", localDst, "err", err) - // } - //}() + defer func() { + //clean up the bloom directory + if err := os.RemoveAll(localDst); err != nil { + level.Error(logger).Log("msg", "failed to remove block directory", "dir", localDst, "err", err) + } + }() var resultingBlock bloomshipper.Block defer func() { @@ -527,8 +535,7 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, return err } - fpRate := c.limits.BloomFalsePositiveRate(job.tenantID) - resultingBlock, err = compactNewChunks(ctx, logger, job, fpRate, bt, storeClient.chunk, builder) + resultingBlock, err = compactNewChunks(ctx, logger, job, bt, storeClient.chunk, builder, c.limits) if err != nil { return level.Error(logger).Log("msg", "failed compacting new chunks", "err", err) } @@ -537,7 +544,7 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, // When already compacted metas exists, we need to merge all blocks with amending blooms with new series level.Info(logger).Log("msg", "already compacted metas exists, use mergeBlockBuilder") - var populate = createPopulateFunc(ctx, logger, job, storeClient, bt) + var populate = createPopulateFunc(ctx, job, storeClient, bt, c.limits) seriesIter := makeSeriesIterFromSeriesMeta(job) @@ -551,6 +558,7 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, }() if err != nil { + level.Error(logger).Log("err", err) return err } @@ -565,6 +573,7 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, level.Error(logger).Log("msg", "failed merging existing blocks with new chunks", "err", err) return err } + } archivePath := filepath.Join(c.cfg.WorkingDirectory, uuid.New().String()) @@ -575,13 +584,12 @@ func (c *Compactor) runCompact(ctx context.Context, logger log.Logger, job Job, return err } - // TODO(poyzannur) enable once debugging is over - //defer func() { - // err = os.Remove(archivePath) - // if err != nil { - // level.Error(logger).Log("msg", "failed removing archive file", "err", err, "file", archivePath) - // } - //}() + defer func() { + err = os.Remove(archivePath) + if err != nil { + level.Error(logger).Log("msg", "failed removing archive file", "err", err, "file", archivePath) + } + }() // Do not change the signature of PutBlocks yet. // Once block size is limited potentially, compactNewChunks will return multiple blocks, hence a list is appropriate. diff --git a/pkg/bloomcompactor/chunkcompactor.go b/pkg/bloomcompactor/chunkcompactor.go index f0f59882b31c..c4993ccc62a5 100644 --- a/pkg/bloomcompactor/chunkcompactor.go +++ b/pkg/bloomcompactor/chunkcompactor.go @@ -22,7 +22,7 @@ import ( ) type compactorTokenizer interface { - PopulateSeriesWithBloom(bloom *v1.SeriesWithBloom, chunks []chunk.Chunk) error + PopulateSeriesWithBloom(bloom *v1.SeriesWithBloom, chunkBatchesIterator v1.Iterator[[]chunk.Chunk]) error } type chunkClient interface { @@ -86,7 +86,7 @@ func makeChunkRefs(chksMetas []tsdbindex.ChunkMeta, tenant string, fp model.Fing return chunkRefs } -func buildBloomFromSeries(seriesMeta seriesMeta, fpRate float64, tokenizer compactorTokenizer, chunks []chunk.Chunk) (v1.SeriesWithBloom, error) { +func buildBloomFromSeries(seriesMeta seriesMeta, fpRate float64, tokenizer compactorTokenizer, chunks v1.Iterator[[]chunk.Chunk]) (v1.SeriesWithBloom, error) { // Create a bloom for this series bloomForChks := v1.SeriesWithBloom{ Series: &v1.Series{ @@ -155,21 +155,20 @@ func createLocalDirName(workingDir string, job Job) string { } // Compacts given list of chunks, uploads them to storage and returns a list of bloomBlocks -func compactNewChunks( - ctx context.Context, +func compactNewChunks(ctx context.Context, logger log.Logger, job Job, - fpRate float64, bt compactorTokenizer, storeClient chunkClient, builder blockBuilder, + limits Limits, ) (bloomshipper.Block, error) { // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). if err := ctx.Err(); err != nil { return bloomshipper.Block{}, err } - bloomIter := newLazyBloomBuilder(ctx, job, storeClient, bt, fpRate, logger) + bloomIter := newLazyBloomBuilder(ctx, job, storeClient, bt, logger, limits) // Build and upload bloomBlock to storage block, err := buildBlockFromBlooms(ctx, logger, builder, bloomIter, job) @@ -182,13 +181,14 @@ func compactNewChunks( } type lazyBloomBuilder struct { - ctx context.Context - metas v1.Iterator[seriesMeta] - tenant string - client chunkClient - bt compactorTokenizer - fpRate float64 - logger log.Logger + ctx context.Context + metas v1.Iterator[seriesMeta] + tenant string + client chunkClient + bt compactorTokenizer + fpRate float64 + logger log.Logger + chunksBatchSize int cur v1.SeriesWithBloom // retured by At() err error // returned by Err() @@ -198,15 +198,16 @@ type lazyBloomBuilder struct { // which are used by the blockBuilder to write a bloom block. // We use an interator to avoid loading all blooms into memory first, before // building the block. -func newLazyBloomBuilder(ctx context.Context, job Job, client chunkClient, bt compactorTokenizer, fpRate float64, logger log.Logger) *lazyBloomBuilder { +func newLazyBloomBuilder(ctx context.Context, job Job, client chunkClient, bt compactorTokenizer, logger log.Logger, limits Limits) *lazyBloomBuilder { return &lazyBloomBuilder{ - ctx: ctx, - metas: v1.NewSliceIter(job.seriesMetas), - client: client, - tenant: job.tenantID, - bt: bt, - fpRate: fpRate, - logger: logger, + ctx: ctx, + metas: v1.NewSliceIter(job.seriesMetas), + client: client, + tenant: job.tenantID, + bt: bt, + fpRate: limits.BloomFalsePositiveRate(job.tenantID), + logger: logger, + chunksBatchSize: limits.BloomCompactorChunksBatchSize(job.tenantID), } } @@ -218,20 +219,18 @@ func (it *lazyBloomBuilder) Next() bool { } meta := it.metas.At() - // Get chunks data from list of chunkRefs - chks, err := it.client.GetChunks(it.ctx, makeChunkRefs(meta.chunkRefs, it.tenant, meta.seriesFP)) + batchesIterator, err := newChunkBatchesIterator(it.ctx, it.client, makeChunkRefs(meta.chunkRefs, it.tenant, meta.seriesFP), it.chunksBatchSize) if err != nil { it.err = err it.cur = v1.SeriesWithBloom{} - level.Debug(it.logger).Log("err in getChunks", err) + level.Debug(it.logger).Log("msg", "err creating chunks batches iterator", "err", err) return false } - - it.cur, err = buildBloomFromSeries(meta, it.fpRate, it.bt, chks) + it.cur, err = buildBloomFromSeries(meta, it.fpRate, it.bt, batchesIterator) if err != nil { it.err = err it.cur = v1.SeriesWithBloom{} - level.Debug(it.logger).Log("err in buildBloomFromSeries", err) + level.Debug(it.logger).Log("msg", "err in buildBloomFromSeries", "err", err) return false } return true diff --git a/pkg/bloomcompactor/chunkcompactor_test.go b/pkg/bloomcompactor/chunkcompactor_test.go index 2d31e05f18f8..8bc94fd26537 100644 --- a/pkg/bloomcompactor/chunkcompactor_test.go +++ b/pkg/bloomcompactor/chunkcompactor_test.go @@ -59,7 +59,7 @@ func TestChunkCompactor_BuildBloomFromSeries(t *testing.T) { chunks := []chunk.Chunk{createTestChunk(fp, label)} mbt := mockBloomTokenizer{} - bloom, err := buildBloomFromSeries(seriesMeta, fpRate, &mbt, chunks) + bloom, err := buildBloomFromSeries(seriesMeta, fpRate, &mbt, v1.NewSliceIter([][]chunk.Chunk{chunks})) require.NoError(t, err) require.Equal(t, seriesMeta.seriesFP, bloom.Series.Fingerprint) require.Equal(t, chunks, mbt.chunks) @@ -110,7 +110,7 @@ func TestChunkCompactor_CompactNewChunks(t *testing.T) { pbb := mockPersistentBlockBuilder{} // Run Compaction - compactedBlock, err := compactNewChunks(context.Background(), logger, job, fpRate, &mbt, &mcc, &pbb) + compactedBlock, err := compactNewChunks(context.Background(), logger, job, &mbt, &mcc, &pbb, mockLimits{fpRate: fpRate}) // Validate Compaction Succeeds require.NoError(t, err) @@ -169,7 +169,7 @@ func TestLazyBloomBuilder(t *testing.T) { mbt := &mockBloomTokenizer{} mcc := &mockChunkClient{} - it := newLazyBloomBuilder(context.Background(), job, mcc, mbt, fpRate, logger) + it := newLazyBloomBuilder(context.Background(), job, mcc, mbt, logger, mockLimits{chunksDownloadingBatchSize: 10, fpRate: fpRate}) // first seriesMeta has 1 chunks require.True(t, it.Next()) @@ -199,8 +199,10 @@ type mockBloomTokenizer struct { chunks []chunk.Chunk } -func (mbt *mockBloomTokenizer) PopulateSeriesWithBloom(_ *v1.SeriesWithBloom, c []chunk.Chunk) error { - mbt.chunks = append(mbt.chunks, c...) +func (mbt *mockBloomTokenizer) PopulateSeriesWithBloom(_ *v1.SeriesWithBloom, c v1.Iterator[[]chunk.Chunk]) error { + for c.Next() { + mbt.chunks = append(mbt.chunks, c.At()...) + } return nil } diff --git a/pkg/bloomcompactor/chunksbatchesiterator.go b/pkg/bloomcompactor/chunksbatchesiterator.go new file mode 100644 index 000000000000..a4494b02b7e4 --- /dev/null +++ b/pkg/bloomcompactor/chunksbatchesiterator.go @@ -0,0 +1,48 @@ +package bloomcompactor + +import ( + "context" + "errors" + + "github.com/grafana/loki/pkg/storage/chunk" +) + +type chunksBatchesIterator struct { + context context.Context + client chunkClient + chunksToDownload []chunk.Chunk + batchSize int + + currentBatch []chunk.Chunk + err error +} + +func newChunkBatchesIterator(context context.Context, client chunkClient, chunksToDownload []chunk.Chunk, batchSize int) (*chunksBatchesIterator, error) { + if batchSize <= 0 { + return nil, errors.New("batchSize must be greater than 0") + } + return &chunksBatchesIterator{context: context, client: client, chunksToDownload: chunksToDownload, batchSize: batchSize}, nil +} + +func (c *chunksBatchesIterator) Next() bool { + if len(c.chunksToDownload) == 0 { + return false + } + batchSize := c.batchSize + chunksToDownloadCount := len(c.chunksToDownload) + if chunksToDownloadCount < batchSize { + batchSize = chunksToDownloadCount + } + chunksToDownload := c.chunksToDownload[:batchSize] + c.chunksToDownload = c.chunksToDownload[batchSize:] + c.currentBatch, c.err = c.client.GetChunks(c.context, chunksToDownload) + return c.err == nil +} + +func (c *chunksBatchesIterator) Err() error { + return c.err +} + +func (c *chunksBatchesIterator) At() []chunk.Chunk { + return c.currentBatch +} diff --git a/pkg/bloomcompactor/chunksbatchesiterator_test.go b/pkg/bloomcompactor/chunksbatchesiterator_test.go new file mode 100644 index 000000000000..170f2662b508 --- /dev/null +++ b/pkg/bloomcompactor/chunksbatchesiterator_test.go @@ -0,0 +1,96 @@ +package bloomcompactor + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/storage/chunk" + tsdbindex "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb/index" +) + +func Test_chunksBatchesIterator(t *testing.T) { + tests := map[string]struct { + batchSize int + chunksToDownload []chunk.Chunk + constructorError error + + hadNextCount int + }{ + "expected error if batch size is set to 0": { + batchSize: 0, + constructorError: errors.New("batchSize must be greater than 0"), + }, + "expected no error if there are no chunks": { + hadNextCount: 0, + batchSize: 10, + }, + "expected 1 call to the client": { + chunksToDownload: createFakeChunks(10), + hadNextCount: 1, + batchSize: 20, + }, + "expected 1 call to the client(2)": { + chunksToDownload: createFakeChunks(10), + hadNextCount: 1, + batchSize: 10, + }, + "expected 2 calls to the client": { + chunksToDownload: createFakeChunks(10), + hadNextCount: 2, + batchSize: 6, + }, + "expected 10 calls to the client": { + chunksToDownload: createFakeChunks(10), + hadNextCount: 10, + batchSize: 1, + }, + } + for name, data := range tests { + t.Run(name, func(t *testing.T) { + client := &fakeClient{} + iterator, err := newChunkBatchesIterator(context.Background(), client, data.chunksToDownload, data.batchSize) + if data.constructorError != nil { + require.Equal(t, err, data.constructorError) + return + } + hadNextCount := 0 + var downloadedChunks []chunk.Chunk + for iterator.Next() { + hadNextCount++ + downloaded := iterator.At() + downloadedChunks = append(downloadedChunks, downloaded...) + require.LessOrEqual(t, len(downloaded), data.batchSize) + } + require.NoError(t, iterator.Err()) + require.Equal(t, data.chunksToDownload, downloadedChunks) + require.Equal(t, data.hadNextCount, client.callsCount) + require.Equal(t, data.hadNextCount, hadNextCount) + }) + } +} + +func createFakeChunks(count int) []chunk.Chunk { + metas := make([]tsdbindex.ChunkMeta, 0, count) + for i := 0; i < count; i++ { + metas = append(metas, tsdbindex.ChunkMeta{ + Checksum: uint32(i), + MinTime: int64(i), + MaxTime: int64(i + 100), + KB: uint32(i * 100), + Entries: uint32(i * 10), + }) + } + return makeChunkRefs(metas, "fake", 0xFFFF) +} + +type fakeClient struct { + callsCount int +} + +func (f *fakeClient) GetChunks(_ context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { + f.callsCount++ + return chunks, nil +} diff --git a/pkg/bloomcompactor/config.go b/pkg/bloomcompactor/config.go index c3969ac6af38..884034fdd043 100644 --- a/pkg/bloomcompactor/config.go +++ b/pkg/bloomcompactor/config.go @@ -41,6 +41,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { type Limits interface { downloads.Limits BloomCompactorShardSize(tenantID string) int + BloomCompactorChunksBatchSize(userID string) int BloomCompactorMaxTableAge(tenantID string) time.Duration BloomCompactorEnabled(tenantID string) bool BloomNGramLength(tenantID string) int diff --git a/pkg/bloomcompactor/mergecompactor.go b/pkg/bloomcompactor/mergecompactor.go index 0cf55cef86a7..6e2143f75135 100644 --- a/pkg/bloomcompactor/mergecompactor.go +++ b/pkg/bloomcompactor/mergecompactor.go @@ -2,6 +2,7 @@ package bloomcompactor import ( "context" + "fmt" "github.com/grafana/dskit/concurrency" @@ -74,7 +75,7 @@ func makeBlockIterFromBlocks(ctx context.Context, logger log.Logger, return blockIters, blockPaths, nil } -func createPopulateFunc(ctx context.Context, logger log.Logger, job Job, storeClient storeClient, bt *v1.BloomTokenizer) func(series *v1.Series, bloom *v1.Bloom) error { +func createPopulateFunc(ctx context.Context, job Job, storeClient storeClient, bt *v1.BloomTokenizer, limits Limits) func(series *v1.Series, bloom *v1.Bloom) error { return func(series *v1.Series, bloom *v1.Bloom) error { bloomForChks := v1.SeriesWithBloom{ Series: series, @@ -95,12 +96,11 @@ func createPopulateFunc(ctx context.Context, logger log.Logger, job Job, storeCl } } - chks, err := storeClient.chunk.GetChunks(ctx, chunkRefs) + batchesIterator, err := newChunkBatchesIterator(ctx, storeClient.chunk, chunkRefs, limits.BloomCompactorChunksBatchSize(job.tenantID)) if err != nil { - level.Error(logger).Log("msg", "failed downloading chunks", "err", err) - return err + return fmt.Errorf("error creating chunks batches iterator: %w", err) } - err = bt.PopulateSeriesWithBloom(&bloomForChks, chks) + err = bt.PopulateSeriesWithBloom(&bloomForChks, batchesIterator) if err != nil { return err } diff --git a/pkg/bloomcompactor/sharding_test.go b/pkg/bloomcompactor/sharding_test.go index fc77536f6061..4e79752279fb 100644 --- a/pkg/bloomcompactor/sharding_test.go +++ b/pkg/bloomcompactor/sharding_test.go @@ -128,9 +128,22 @@ func TestShuffleSharding(t *testing.T) { type mockLimits struct { *validation.Overrides - bloomCompactorShardSize int + bloomCompactorShardSize int + chunksDownloadingBatchSize int + fpRate float64 +} + +func (m mockLimits) BloomFalsePositiveRate(_ string) float64 { + return m.fpRate } func (m mockLimits) BloomCompactorShardSize(_ string) int { return m.bloomCompactorShardSize } + +func (m mockLimits) BloomCompactorChunksBatchSize(_ string) int { + if m.chunksDownloadingBatchSize != 0 { + return m.chunksDownloadingBatchSize + } + return 1 +} diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index 403378e016a9..5c2cc9dad003 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -27,8 +27,6 @@ of line filter expressions. | bloomgateway.Worker | - bloomshipper.Store - | bloomshipper.Shipper | bloomshipper.BloomFileClient @@ -80,8 +78,10 @@ var ( ) type metrics struct { - queueDuration prometheus.Histogram - inflightRequests prometheus.Summary + queueDuration prometheus.Histogram + inflightRequests prometheus.Summary + chunkRefsUnfiltered prometheus.Counter + chunkRefsFiltered prometheus.Counter } func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) *metrics { @@ -102,9 +102,29 @@ func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) * MaxAge: time.Minute, AgeBuckets: 6, }), + chunkRefsUnfiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "chunkrefs_pre_filtering", + Help: "Total amount of chunk refs pre filtering. Does not count chunk refs in failed requests.", + }), + chunkRefsFiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "chunkrefs_post_filtering", + Help: "Total amount of chunk refs post filtering.", + }), } } +func (m *metrics) addUnfilteredCount(n int) { + m.chunkRefsUnfiltered.Add(float64(n)) +} + +func (m *metrics) addFilteredCount(n int) { + m.chunkRefsFiltered.Add(float64(n)) +} + // SyncMap is a map structure which can be synchronized using the RWMutex type SyncMap[k comparable, v any] struct { sync.RWMutex @@ -149,9 +169,9 @@ type Gateway struct { workerMetrics *workerMetrics queueMetrics *queue.Metrics - queue *queue.RequestQueue - activeUsers *util.ActiveUsersCleanupService - bloomStore bloomshipper.Store + queue *queue.RequestQueue + activeUsers *util.ActiveUsersCleanupService + bloomShipper bloomshipper.Interface sharding ShardingStrategy @@ -180,9 +200,8 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, o sharding: shardingStrategy, pendingTasks: makePendingTasks(pendingTasksInitialCap), workerConfig: workerConfig{ - maxWaitTime: 200 * time.Millisecond, - maxItems: 100, - processBlocksSequentially: false, + maxWaitTime: 200 * time.Millisecond, + maxItems: 100, }, workerMetrics: newWorkerMetrics(reg, constants.Loki, metricsSubsystem), queueMetrics: queue.NewMetrics(reg, constants.Loki, metricsSubsystem), @@ -201,13 +220,8 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, o return nil, err } - bloomStore, err := bloomshipper.NewBloomStore(bloomShipper) - if err != nil { - return nil, err - } - // We need to keep a reference to be able to call Stop() on shutdown of the gateway. - g.bloomStore = bloomStore + g.bloomShipper = bloomShipper if err := g.initServices(); err != nil { return nil, err @@ -222,7 +236,7 @@ func (g *Gateway) initServices() error { svcs := []services.Service{g.queue, g.activeUsers} for i := 0; i < g.cfg.WorkerConcurrency; i++ { id := fmt.Sprintf("bloom-query-worker-%d", i) - w := newWorker(id, g.workerConfig, g.queue, g.bloomStore, g.pendingTasks, g.logger, g.workerMetrics) + w := newWorker(id, g.workerConfig, g.queue, g.bloomShipper, g.pendingTasks, g.logger, g.workerMetrics) svcs = append(svcs, w) } g.serviceMngr, err = services.NewManager(svcs...) @@ -274,7 +288,7 @@ func (g *Gateway) running(ctx context.Context) error { } func (g *Gateway) stopping(_ error) error { - g.bloomStore.Stop() + g.bloomShipper.Stop() return services.StopManagerAndAwaitStopped(context.Background(), g.serviceMngr) } @@ -285,8 +299,12 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk return nil, err } + numChunksUnfiltered := len(req.Refs) + // Shortcut if request does not contain filters if len(req.Filters) == 0 { + g.metrics.addUnfilteredCount(numChunksUnfiltered) + g.metrics.addFilteredCount(len(req.Refs)) return &logproto.FilterChunkRefResponse{ ChunkRefs: req.Refs, }, nil @@ -314,6 +332,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk responses := responsesPool.Get(requestCount) defer responsesPool.Put(responses) +outer: for { select { case <-ctx.Done(): @@ -323,20 +342,27 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk case res := <-resCh: responses = append(responses, res) // log line is helpful for debugging tests - // level.Debug(g.logger).Log("msg", "got partial result", "task", task.ID, "tenant", tenantID, "fp", uint64(res.Fp), "chunks", res.Removals.Len(), "progress", fmt.Sprintf("%d/%d", len(responses), requestCount)) + level.Debug(g.logger).Log("msg", "got partial result", "task", task.ID, "tenant", tenantID, "fp_int", uint64(res.Fp), "fp_hex", res.Fp, "chunks_to_remove", res.Removals.Len(), "progress", fmt.Sprintf("%d/%d", len(responses), requestCount)) // wait for all parts of the full response if len(responses) == requestCount { - for _, o := range responses { - if res.Removals.Len() == 0 { - continue - } - // we must not remove items from req.Refs as long as the worker may iterater over them - g.removeNotMatchingChunks(req, o) - } - return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil + break outer } } } + + for _, o := range responses { + if o.Removals.Len() == 0 { + continue + } + // we must not remove items from req.Refs as long as the worker may iterater over them + g.removeNotMatchingChunks(req, o) + } + + g.metrics.addUnfilteredCount(numChunksUnfiltered) + g.metrics.addFilteredCount(len(req.Refs)) + + level.Debug(g.logger).Log("msg", "return filtered chunk refs", "unfiltered", numChunksUnfiltered, "filtered", len(req.Refs)) + return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil } func (g *Gateway) removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output) { diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index b34e3d55852a..1e85e7d2089c 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -269,89 +269,73 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { }) t.Run("use fuse queriers to filter chunks", func(t *testing.T) { - for _, tc := range []struct { - name string - value bool - }{ - {"sequentially", true}, - {"callback", false}, - } { - t.Run(tc.name, func(t *testing.T) { - - reg := prometheus.NewRegistry() - gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) - require.NoError(t, err) - - now := mktime("2023-10-03 10:00") - - // replace store implementation and re-initialize workers and sub-services - bqs, data := createBlockQueriers(t, 5, now.Add(-8*time.Hour), now, 0, 1024) - gw.bloomStore = newMockBloomStore(bqs) - gw.workerConfig.processBlocksSequentially = tc.value - err = gw.initServices() - require.NoError(t, err) - - t.Log("process blocks in worker sequentially", gw.workerConfig.processBlocksSequentially) - - err = services.StartAndAwaitRunning(context.Background(), gw) - require.NoError(t, err) - t.Cleanup(func() { - err = services.StopAndAwaitTerminated(context.Background(), gw) - require.NoError(t, err) - }) + reg := prometheus.NewRegistry() + gw, err := New(cfg, schemaCfg, storageCfg, limits, ss, cm, logger, reg) + require.NoError(t, err) - chunkRefs := createQueryInputFromBlockData(t, tenantID, data, 100) - - t.Run("no match - return empty response", func(t *testing.T) { - inputChunkRefs := groupRefs(t, chunkRefs) - req := &logproto.FilterChunkRefRequest{ - From: now.Add(-8 * time.Hour), - Through: now, - Refs: inputChunkRefs, - Filters: []syntax.LineFilter{ - {Ty: labels.MatchEqual, Match: "does not match"}, - }, - } - ctx := user.InjectOrgID(context.Background(), tenantID) - res, err := gw.FilterChunkRefs(ctx, req) - require.NoError(t, err) - - expectedResponse := &logproto.FilterChunkRefResponse{ - ChunkRefs: []*logproto.GroupedChunkRefs{}, - } - require.Equal(t, expectedResponse, res) - }) + now := mktime("2023-10-03 10:00") - t.Run("match - return filtered", func(t *testing.T) { - inputChunkRefs := groupRefs(t, chunkRefs) - // hack to get indexed key for a specific series - // the indexed key range for a series is defined as - // i * keysPerSeries ... i * keysPerSeries + keysPerSeries - 1 - // where i is the nth series in a block - // fortunately, i is also used as Checksum for the single chunk of a series - // see mkBasicSeriesWithBlooms() in pkg/storage/bloom/v1/test_util.go - key := inputChunkRefs[0].Refs[0].Checksum*1000 + 500 - - req := &logproto.FilterChunkRefRequest{ - From: now.Add(-8 * time.Hour), - Through: now, - Refs: inputChunkRefs, - Filters: []syntax.LineFilter{ - {Ty: labels.MatchEqual, Match: fmt.Sprintf("series %d", key)}, - }, - } - ctx := user.InjectOrgID(context.Background(), tenantID) - res, err := gw.FilterChunkRefs(ctx, req) - require.NoError(t, err) - - expectedResponse := &logproto.FilterChunkRefResponse{ - ChunkRefs: inputChunkRefs[:1], - } - require.Equal(t, expectedResponse, res) - }) + // replace store implementation and re-initialize workers and sub-services + bqs, data := createBlockQueriers(t, 5, now.Add(-8*time.Hour), now, 0, 1024) + gw.bloomShipper = newMockBloomStore(bqs) + err = gw.initServices() + require.NoError(t, err) - }) - } + err = services.StartAndAwaitRunning(context.Background(), gw) + require.NoError(t, err) + t.Cleanup(func() { + err = services.StopAndAwaitTerminated(context.Background(), gw) + require.NoError(t, err) + }) + + chunkRefs := createQueryInputFromBlockData(t, tenantID, data, 100) + + t.Run("no match - return empty response", func(t *testing.T) { + inputChunkRefs := groupRefs(t, chunkRefs) + req := &logproto.FilterChunkRefRequest{ + From: now.Add(-8 * time.Hour), + Through: now, + Refs: inputChunkRefs, + Filters: []syntax.LineFilter{ + {Ty: labels.MatchEqual, Match: "does not match"}, + }, + } + ctx := user.InjectOrgID(context.Background(), tenantID) + res, err := gw.FilterChunkRefs(ctx, req) + require.NoError(t, err) + + expectedResponse := &logproto.FilterChunkRefResponse{ + ChunkRefs: []*logproto.GroupedChunkRefs{}, + } + require.Equal(t, expectedResponse, res) + }) + + t.Run("match - return filtered", func(t *testing.T) { + inputChunkRefs := groupRefs(t, chunkRefs) + // hack to get indexed key for a specific series + // the indexed key range for a series is defined as + // i * keysPerSeries ... i * keysPerSeries + keysPerSeries - 1 + // where i is the nth series in a block + // fortunately, i is also used as Checksum for the single chunk of a series + // see mkBasicSeriesWithBlooms() in pkg/storage/bloom/v1/test_util.go + key := inputChunkRefs[0].Refs[0].Checksum*1000 + 500 + + req := &logproto.FilterChunkRefRequest{ + From: now.Add(-8 * time.Hour), + Through: now, + Refs: inputChunkRefs, + Filters: []syntax.LineFilter{ + {Ty: labels.MatchEqual, Match: fmt.Sprintf("series %d", key)}, + }, + } + ctx := user.InjectOrgID(context.Background(), tenantID) + res, err := gw.FilterChunkRefs(ctx, req) + require.NoError(t, err) + expectedResponse := &logproto.FilterChunkRefResponse{ + ChunkRefs: inputChunkRefs[:1], + } + require.Equal(t, expectedResponse, res) + }) }) } @@ -388,15 +372,10 @@ type mockBloomStore struct { bqs []bloomshipper.BlockQuerierWithFingerprintRange } -var _ bloomshipper.Store = &mockBloomStore{} - -// GetBlockQueriersForBlockRefs implements bloomshipper.Store. -func (s *mockBloomStore) GetBlockQueriersForBlockRefs(_ context.Context, _ string, _ []bloomshipper.BlockRef) ([]bloomshipper.BlockQuerierWithFingerprintRange, error) { - return s.bqs, nil -} +var _ bloomshipper.Interface = &mockBloomStore{} -// GetBlockRefs implements bloomshipper.Store. -func (s *mockBloomStore) GetBlockRefs(_ context.Context, tenant string, _, _ time.Time) ([]bloomshipper.BlockRef, error) { +// GetBlockRefs implements bloomshipper.Interface +func (s *mockBloomStore) GetBlockRefs(_ context.Context, tenant string, _, _ model.Time) ([]bloomshipper.BlockRef, error) { blocks := make([]bloomshipper.BlockRef, 0, len(s.bqs)) for i := range s.bqs { blocks = append(blocks, bloomshipper.BlockRef{ @@ -410,15 +389,11 @@ func (s *mockBloomStore) GetBlockRefs(_ context.Context, tenant string, _, _ tim return blocks, nil } -// GetBlockQueriers implements bloomshipper.Store. -func (s *mockBloomStore) GetBlockQueriers(_ context.Context, _ string, _, _ time.Time, _ []uint64) ([]bloomshipper.BlockQuerierWithFingerprintRange, error) { - return s.bqs, nil -} - +// Stop implements bloomshipper.Interface func (s *mockBloomStore) Stop() {} -// ForEach implements bloomshipper.Store. -func (s *mockBloomStore) ForEach(_ context.Context, _ string, _ []bloomshipper.BlockRef, callback bloomshipper.ForEachBlockCallback) error { +// Fetch implements bloomshipper.Interface +func (s *mockBloomStore) Fetch(_ context.Context, _ string, _ []bloomshipper.BlockRef, callback bloomshipper.ForEachBlockCallback) error { shuffled := make([]bloomshipper.BlockQuerierWithFingerprintRange, len(s.bqs)) _ = copy(shuffled, s.bqs) diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go index e82a0daea63c..73100025a743 100644 --- a/pkg/bloomgateway/worker.go +++ b/pkg/bloomgateway/worker.go @@ -20,8 +20,6 @@ import ( type workerConfig struct { maxWaitTime time.Duration maxItems int - - processBlocksSequentially bool } type workerMetrics struct { @@ -29,6 +27,7 @@ type workerMetrics struct { dequeueErrors *prometheus.CounterVec dequeueWaitTime *prometheus.SummaryVec storeAccessLatency *prometheus.HistogramVec + bloomQueryLatency *prometheus.HistogramVec } func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem string) *workerMetrics { @@ -52,6 +51,13 @@ func newWorkerMetrics(registerer prometheus.Registerer, namespace, subsystem str Name: "dequeue_wait_time", Help: "Time spent waiting for dequeuing tasks from queue", }, labels), + bloomQueryLatency: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "bloom_query_latency", + Help: "Latency in seconds of processing bloom blocks", + }, append(labels, "status")), + // TODO(chaudum): Move this metric into the bloomshipper storeAccessLatency: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, @@ -72,18 +78,18 @@ type worker struct { id string cfg workerConfig queue *queue.RequestQueue - store bloomshipper.Store + shipper bloomshipper.Interface tasks *pendingTasks logger log.Logger metrics *workerMetrics } -func newWorker(id string, cfg workerConfig, queue *queue.RequestQueue, store bloomshipper.Store, tasks *pendingTasks, logger log.Logger, metrics *workerMetrics) *worker { +func newWorker(id string, cfg workerConfig, queue *queue.RequestQueue, shipper bloomshipper.Interface, tasks *pendingTasks, logger log.Logger, metrics *workerMetrics) *worker { w := &worker{ id: id, cfg: cfg, queue: queue, - store: store, + shipper: shipper, tasks: tasks, logger: log.With(logger, "worker", id), metrics: metrics, @@ -156,7 +162,7 @@ func (w *worker) running(ctx context.Context) error { level.Debug(logger).Log("msg", "process tasks", "tasks", len(tasks)) storeFetchStart := time.Now() - blockRefs, err := w.store.GetBlockRefs(taskCtx, tasks[0].Tenant, day, day.Add(Day).Add(-1*time.Nanosecond)) + blockRefs, err := w.shipper.GetBlockRefs(taskCtx, tasks[0].Tenant, toModelTime(day), toModelTime(day.Add(Day).Add(-1*time.Nanosecond))) w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockRefs").Observe(time.Since(storeFetchStart).Seconds()) if err != nil { for _, t := range tasks { @@ -188,11 +194,7 @@ func (w *worker) running(ctx context.Context) error { blockRefs = append(blockRefs, b.blockRef) } - if w.cfg.processBlocksSequentially { - err = w.processBlocksSequentially(taskCtx, tasks[0].Tenant, day, blockRefs, boundedRefs) - } else { - err = w.processBlocksWithCallback(taskCtx, tasks[0].Tenant, day, blockRefs, boundedRefs) - } + err = w.processBlocksWithCallback(taskCtx, tasks[0].Tenant, day, blockRefs, boundedRefs) if err != nil { for _, t := range tasks { t.ErrCh <- err @@ -216,46 +218,39 @@ func (w *worker) stopping(err error) error { } func (w *worker) processBlocksWithCallback(taskCtx context.Context, tenant string, day time.Time, blockRefs []bloomshipper.BlockRef, boundedRefs []boundedTasks) error { - return w.store.ForEach(taskCtx, tenant, blockRefs, func(bq *v1.BlockQuerier, minFp, maxFp uint64) error { + return w.shipper.Fetch(taskCtx, tenant, blockRefs, func(bq *v1.BlockQuerier, minFp, maxFp uint64) error { for _, b := range boundedRefs { if b.blockRef.MinFingerprint == minFp && b.blockRef.MaxFingerprint == maxFp { - processBlock(bq, day, b.tasks) - return nil + return w.processBlock(bq, day, b.tasks) } } return nil }) } -func (w *worker) processBlocksSequentially(taskCtx context.Context, tenant string, day time.Time, blockRefs []bloomshipper.BlockRef, boundedRefs []boundedTasks) error { - storeFetchStart := time.Now() - blockQueriers, err := w.store.GetBlockQueriersForBlockRefs(taskCtx, tenant, blockRefs) - w.metrics.storeAccessLatency.WithLabelValues(w.id, "GetBlockQueriersForBlockRefs").Observe(time.Since(storeFetchStart).Seconds()) - if err != nil { - return err - } - - for i := range blockQueriers { - processBlock(blockQueriers[i].BlockQuerier, day, boundedRefs[i].tasks) - } - return nil -} - -func processBlock(blockQuerier *v1.BlockQuerier, day time.Time, tasks []Task) { +func (w *worker) processBlock(blockQuerier *v1.BlockQuerier, day time.Time, tasks []Task) error { schema, err := blockQuerier.Schema() if err != nil { - for _, t := range tasks { - t.ErrCh <- errors.Wrap(err, "failed to get block schema") - } + return err } tokenizer := v1.NewNGramTokenizer(schema.NGramLen(), 0) it := newTaskMergeIterator(day, tokenizer, tasks...) fq := blockQuerier.Fuse([]v1.PeekingIterator[v1.Request]{it}) + + start := time.Now() err = fq.Run() + duration := time.Since(start).Seconds() + if err != nil { - for _, t := range tasks { - t.ErrCh <- errors.Wrap(err, "failed to run chunk check") - } + w.metrics.bloomQueryLatency.WithLabelValues(w.id, "failure").Observe(duration) + return err } + + w.metrics.bloomQueryLatency.WithLabelValues(w.id, "success").Observe(duration) + return nil +} + +func toModelTime(t time.Time) model.Time { + return model.TimeFromUnixNano(t.UnixNano()) } diff --git a/pkg/logql/log/labels.go b/pkg/logql/log/labels.go index 2144abdf5d7e..ea4570e652df 100644 --- a/pkg/logql/log/labels.go +++ b/pkg/logql/log/labels.go @@ -157,7 +157,7 @@ type LabelsBuilder struct { // NewBaseLabelsBuilderWithGrouping creates a new base labels builder with grouping to compute results. func NewBaseLabelsBuilderWithGrouping(groups []string, parserKeyHints ParserHint, without, noLabels bool) *BaseLabelsBuilder { if parserKeyHints == nil { - parserKeyHints = noParserHints + parserKeyHints = NoParserHints() } const labelsCapacity = 16 @@ -179,7 +179,7 @@ func NewBaseLabelsBuilderWithGrouping(groups []string, parserKeyHints ParserHint // NewBaseLabelsBuilder creates a new base labels builder. func NewBaseLabelsBuilder() *BaseLabelsBuilder { - return NewBaseLabelsBuilderWithGrouping(nil, noParserHints, false, false) + return NewBaseLabelsBuilderWithGrouping(nil, NoParserHints(), false, false) } // ForLabels creates a labels builder for a given labels set as base. diff --git a/pkg/logql/log/parser.go b/pkg/logql/log/parser.go index be059a283156..c03e7c91cb96 100644 --- a/pkg/logql/log/parser.go +++ b/pkg/logql/log/parser.go @@ -493,11 +493,13 @@ func (l *LogfmtExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilde return "", false } - if !lbs.ParserLabelHints().ShouldExtract(sanitized) { + _, alwaysExtract := keys[sanitized] + if !alwaysExtract && !lbs.ParserLabelHints().ShouldExtract(sanitized) { return "", false } return sanitized, true }) + if !ok { continue } @@ -530,6 +532,7 @@ func (l *LogfmtExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilde } } } + if l.strict && l.dec.Err() != nil { addErrLabel(errLogfmt, l.dec.Err(), lbs) return line, true diff --git a/pkg/logql/log/parser_hints.go b/pkg/logql/log/parser_hints.go index cdb61015dd4d..3fd4cff2b332 100644 --- a/pkg/logql/log/parser_hints.go +++ b/pkg/logql/log/parser_hints.go @@ -6,7 +6,9 @@ import ( "github.com/grafana/loki/pkg/logqlmodel" ) -var noParserHints = &Hints{} +func NoParserHints() ParserHint { + return &Hints{} +} // ParserHint are hints given to LogQL parsers. // This is specially useful for parser that extract implicitly all possible label keys. @@ -58,10 +60,6 @@ type Hints struct { } func (p *Hints) ShouldExtract(key string) bool { - if len(p.requiredLabels) == 0 { - return true - } - for _, l := range p.extracted { if l == key { return false @@ -74,7 +72,7 @@ func (p *Hints) ShouldExtract(key string) bool { } } - return false + return len(p.requiredLabels) == 0 } func (p *Hints) ShouldExtractPrefix(prefix string) bool { @@ -95,19 +93,25 @@ func (p *Hints) NoLabels() bool { } func (p *Hints) RecordExtracted(key string) { - for _, l := range p.requiredLabels { - if l == key { - p.extracted = append(p.extracted, key) - return - } - } + p.extracted = append(p.extracted, key) } func (p *Hints) AllRequiredExtracted() bool { - if len(p.requiredLabels) == 0 { + if len(p.requiredLabels) == 0 || len(p.extracted) < len(p.requiredLabels) { return false } - return len(p.extracted) == len(p.requiredLabels) + + found := 0 + for _, l := range p.requiredLabels { + for _, e := range p.extracted { + if l == e { + found++ + break + } + } + } + + return len(p.requiredLabels) == found } func (p *Hints) Reset() { @@ -172,9 +176,6 @@ func NewParserHint(requiredLabelNames, groups []string, without, noLabels bool, return ph } - ph.requiredLabels = hints - ph.shouldPreserveError = containsError(hints) - return &Hints{requiredLabels: hints, extracted: extracted, shouldPreserveError: containsError(hints)} } diff --git a/pkg/logql/log/parser_hints_test.go b/pkg/logql/log/parser_hints_test.go index ac232bfd871b..42d0134bc1d8 100644 --- a/pkg/logql/log/parser_hints_test.go +++ b/pkg/logql/log/parser_hints_test.go @@ -28,7 +28,10 @@ var ( "response": { "status": 204, "latency_seconds": "30.001" - } + }, + "message": { + "message": "foo", + } }`) packedLine = []byte(`{ @@ -58,14 +61,14 @@ func Test_ParserHints(t *testing.T) { jsonLine, true, 1.0, - `{app="nginx", cluster="us-central-west", cluster_extracted="us-east-west", protocol="HTTP/2.0", remote_user="foo", request_host="foo.grafana.net", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_latency_seconds="30.001", response_status="204", upstream_addr="10.0.0.1:80"}`, + `{app="nginx", cluster="us-central-west", cluster_extracted="us-east-west", message_message="foo", protocol="HTTP/2.0", remote_user="foo", request_host="foo.grafana.net", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_latency_seconds="30.001", response_status="204", upstream_addr="10.0.0.1:80"}`, }, { `sum without (request_host,app,cluster) (rate({app="nginx"} | json | __error__="" | response_status = 204 [1m]))`, jsonLine, true, 1.0, - `{cluster_extracted="us-east-west", protocol="HTTP/2.0", remote_user="foo", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_latency_seconds="30.001", response_status="204", upstream_addr="10.0.0.1:80"}`, + `{cluster_extracted="us-east-west", message_message="foo", protocol="HTTP/2.0", remote_user="foo", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_latency_seconds="30.001", response_status="204", upstream_addr="10.0.0.1:80"}`, }, { `sum by (request_host,app) (rate({app="nginx"} | json | __error__="" | response_status = 204 [1m]))`, @@ -114,14 +117,14 @@ func Test_ParserHints(t *testing.T) { jsonLine, true, 30.001, - `{app="nginx", cluster="us-central-west", cluster_extracted="us-east-west", protocol="HTTP/2.0", remote_user="foo", request_host="foo.grafana.net", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_status="204", upstream_addr="10.0.0.1:80"}`, + `{app="nginx", cluster="us-central-west", cluster_extracted="us-east-west", message_message="foo", protocol="HTTP/2.0", remote_user="foo", request_host="foo.grafana.net", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_status="204", upstream_addr="10.0.0.1:80"}`, }, { `sum without (request_host,app,cluster)(rate({app="nginx"} | json | response_status = 204 | unwrap response_latency_seconds [1m]))`, jsonLine, true, 30.001, - `{cluster_extracted="us-east-west", protocol="HTTP/2.0", remote_user="foo", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_status="204", upstream_addr="10.0.0.1:80"}`, + `{cluster_extracted="us-east-west", message_message="foo", protocol="HTTP/2.0", remote_user="foo", request_method="POST", request_size="101", request_time="30.001", request_uri="/rpc/v2/stage", response_status="204", upstream_addr="10.0.0.1:80"}`, }, { `sum(rate({app="nginx"} | logfmt | org_id=3677 | unwrap Ingester_TotalReached[1m]))`, @@ -214,6 +217,13 @@ func Test_ParserHints(t *testing.T) { 0, ``, }, + { + `sum by (message_message,app)(count_over_time({app="nginx"} | json | response_status = 204 and remote_user = "foo"[1m]))`, + jsonLine, + true, + 1, + `{app="nginx", message_message="foo"}`, + }, } { tt := tt t.Run(tt.expr, func(t *testing.T) { diff --git a/pkg/logql/log/parser_test.go b/pkg/logql/log/parser_test.go index 246dbed499c9..bd57603ab808 100644 --- a/pkg/logql/log/parser_test.go +++ b/pkg/logql/log/parser_test.go @@ -28,7 +28,7 @@ func Test_jsonParser_Parse(t *testing.T) { "pod_uuid", "foo", "pod_deployment_ref", "foobar", ), - noParserHints, + NoParserHints(), }, { "numeric", @@ -37,7 +37,7 @@ func Test_jsonParser_Parse(t *testing.T) { labels.FromStrings("counter", "1", "price__net_", "5.56909", ), - noParserHints, + NoParserHints(), }, { "escaped", @@ -47,7 +47,7 @@ func Test_jsonParser_Parse(t *testing.T) { "price__net_", "5.56909", "foo", `foo\"bar`, ), - noParserHints, + NoParserHints(), }, { "utf8 error rune", @@ -57,21 +57,21 @@ func Test_jsonParser_Parse(t *testing.T) { "price__net_", "5.56909", "foo", "", ), - noParserHints, + NoParserHints(), }, { "skip arrays", []byte(`{"counter":1, "price": {"net_":["10","20"]}}`), labels.EmptyLabels(), labels.FromStrings("counter", "1"), - noParserHints, + NoParserHints(), }, { "bad key replaced", []byte(`{"cou-nter":1}`), labels.EmptyLabels(), labels.FromStrings("cou_nter", "1"), - noParserHints, + NoParserHints(), }, { "errors", @@ -80,7 +80,7 @@ func Test_jsonParser_Parse(t *testing.T) { labels.FromStrings("__error__", "JSONParserErr", "__error_details__", "Value looks like object, but can't find closing '}' symbol", ), - noParserHints, + NoParserHints(), }, { "errors hints", @@ -103,7 +103,7 @@ func Test_jsonParser_Parse(t *testing.T) { "next_err", "false", "pod_deployment_ref", "foobar", ), - noParserHints, + NoParserHints(), }, } for _, tt := range tests { @@ -255,7 +255,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("app", "foo"), - noParserHints, + NoParserHints(), }, { "alternate syntax", @@ -265,7 +265,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("test", "value"), - noParserHints, + NoParserHints(), }, { "multiple fields", @@ -278,7 +278,7 @@ func TestJSONExpressionParser(t *testing.T) { labels.FromStrings("app", "foo", "namespace", "prod", ), - noParserHints, + NoParserHints(), }, { "utf8", @@ -288,7 +288,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("utf8", "value"), - noParserHints, + NoParserHints(), }, { "nested field", @@ -298,7 +298,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("uuid", "foo"), - noParserHints, + NoParserHints(), }, { "nested field alternate syntax", @@ -308,7 +308,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("uuid", "foo"), - noParserHints, + NoParserHints(), }, { "nested field alternate syntax 2", @@ -318,7 +318,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("uuid", "foo"), - noParserHints, + NoParserHints(), }, { "nested field alternate syntax 3", @@ -328,7 +328,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("uuid", "foo"), - noParserHints, + NoParserHints(), }, { "array element", @@ -338,7 +338,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("param", "1"), - noParserHints, + NoParserHints(), }, { "full array", @@ -348,7 +348,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("params", "[1,2,3]"), - noParserHints, + NoParserHints(), }, { "full object", @@ -358,7 +358,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("deployment", `{"ref":"foobar", "params": [1,2,3]}`), - noParserHints, + NoParserHints(), }, { "expression matching nothing", @@ -368,7 +368,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("nope", ""), - noParserHints, + NoParserHints(), }, { "null field", @@ -379,7 +379,7 @@ func TestJSONExpressionParser(t *testing.T) { labels.EmptyLabels(), labels.FromStrings("nf", ""), // null is coerced to an empty string - noParserHints, + NoParserHints(), }, { "boolean field", @@ -389,7 +389,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("bool", `false`), - noParserHints, + NoParserHints(), }, { "label override", @@ -401,7 +401,7 @@ func TestJSONExpressionParser(t *testing.T) { labels.FromStrings("uuid", "bar", "uuid_extracted", "foo", ), - noParserHints, + NoParserHints(), }, { "non-matching expression", @@ -413,7 +413,7 @@ func TestJSONExpressionParser(t *testing.T) { labels.FromStrings("uuid", "bar", "request_size", "", ), - noParserHints, + NoParserHints(), }, { "empty line", @@ -423,7 +423,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.EmptyLabels(), labels.FromStrings("uuid", ""), - noParserHints, + NoParserHints(), }, { "existing labels are not affected", @@ -435,7 +435,7 @@ func TestJSONExpressionParser(t *testing.T) { labels.FromStrings("foo", "bar", "uuid", "", ), - noParserHints, + NoParserHints(), }, { "invalid JSON line", @@ -447,7 +447,7 @@ func TestJSONExpressionParser(t *testing.T) { labels.FromStrings("foo", "bar", logqlmodel.ErrorLabel, errJSON, ), - noParserHints, + NoParserHints(), }, { "invalid JSON line with hints", @@ -470,7 +470,7 @@ func TestJSONExpressionParser(t *testing.T) { }, labels.FromStrings("foo", "bar"), labels.FromStrings("foo", "bar"), - noParserHints, + NoParserHints(), }, { "nested escaped object", @@ -482,7 +482,7 @@ func TestJSONExpressionParser(t *testing.T) { labels.FromStrings("foo", "bar", "app", `{ "key": "value", "key2":"value2"}`, ), - noParserHints, + NoParserHints(), }, } for _, tt := range tests { @@ -746,7 +746,7 @@ func TestLogfmtParser_parse(t *testing.T) { "__error__", "LogfmtParserErr", "__error_details__", "logfmt syntax error at pos 8 : unexpected '='", ), - noParserHints, + NoParserHints(), }, { "not logfmt with hints", @@ -766,7 +766,7 @@ func TestLogfmtParser_parse(t *testing.T) { labels.EmptyLabels(), labels.FromStrings("buzz", "foo"), nil, - noParserHints, + NoParserHints(), }, { "key alone logfmt", @@ -775,7 +775,7 @@ func TestLogfmtParser_parse(t *testing.T) { labels.FromStrings("foo", "bar", "bar", "foo"), nil, - noParserHints, + NoParserHints(), }, { "quoted logfmt", @@ -785,7 +785,7 @@ func TestLogfmtParser_parse(t *testing.T) { "foobar", "foo bar", ), nil, - noParserHints, + NoParserHints(), }, { "escaped control chars in logfmt", @@ -795,7 +795,7 @@ func TestLogfmtParser_parse(t *testing.T) { "foobar", "foo\nbar\tbaz", ), nil, - noParserHints, + NoParserHints(), }, { "literal control chars in logfmt", @@ -805,7 +805,7 @@ func TestLogfmtParser_parse(t *testing.T) { "foobar", "foo\nbar\tbaz", ), nil, - noParserHints, + NoParserHints(), }, { "escaped slash logfmt", @@ -815,7 +815,7 @@ func TestLogfmtParser_parse(t *testing.T) { "foobar", `foo ba\r baz`, ), nil, - noParserHints, + NoParserHints(), }, { "literal newline and escaped slash logfmt", @@ -825,7 +825,7 @@ func TestLogfmtParser_parse(t *testing.T) { "foobar", "foo bar\nb\\az", ), nil, - noParserHints, + NoParserHints(), }, { "double property logfmt", @@ -836,7 +836,7 @@ func TestLogfmtParser_parse(t *testing.T) { "latency", "10ms", ), nil, - noParserHints, + NoParserHints(), }, { "duplicate from line property", @@ -846,7 +846,7 @@ func TestLogfmtParser_parse(t *testing.T) { "foobar", "10ms", ), nil, - noParserHints, + NoParserHints(), }, { "duplicate property", @@ -857,7 +857,7 @@ func TestLogfmtParser_parse(t *testing.T) { "foobar", "10ms", ), nil, - noParserHints, + NoParserHints(), }, { "invalid key names", @@ -869,7 +869,7 @@ func TestLogfmtParser_parse(t *testing.T) { "test_dash", "foo", ), nil, - noParserHints, + NoParserHints(), }, { "nil", @@ -877,7 +877,7 @@ func TestLogfmtParser_parse(t *testing.T) { labels.FromStrings("foo", "bar"), labels.FromStrings("foo", "bar"), nil, - noParserHints, + NoParserHints(), }, { "empty key", @@ -892,7 +892,7 @@ func TestLogfmtParser_parse(t *testing.T) { "__error__", "LogfmtParserErr", "__error_details__", "logfmt syntax error at pos 15 : unexpected '='", ), - noParserHints, + NoParserHints(), }, { "error rune in key", @@ -906,7 +906,7 @@ func TestLogfmtParser_parse(t *testing.T) { "__error__", "LogfmtParserErr", "__error_details__", "logfmt syntax error at pos 20 : invalid key", ), - noParserHints, + NoParserHints(), }, { "double quote in key", @@ -920,7 +920,7 @@ func TestLogfmtParser_parse(t *testing.T) { "__error__", "LogfmtParserErr", "__error_details__", `logfmt syntax error at pos 17 : unexpected '"'`, ), - noParserHints, + NoParserHints(), }, { "= in value", @@ -933,7 +933,7 @@ func TestLogfmtParser_parse(t *testing.T) { "__error__", "LogfmtParserErr", "__error_details__", `logfmt syntax error at pos 7 : unexpected '='`, ), - noParserHints, + NoParserHints(), }, } @@ -1200,7 +1200,7 @@ func Test_unpackParser_Parse(t *testing.T) { "cluster", "us-central1", ), []byte(`some message`), - noParserHints, + NoParserHints(), }, { "wrong json", @@ -1210,7 +1210,7 @@ func Test_unpackParser_Parse(t *testing.T) { "__error_details__", "expecting json object(6), but it is not", ), []byte(`"app":"foo","namespace":"prod","_entry":"some message","pod":{"uid":"1"}`), - noParserHints, + NoParserHints(), }, { "empty line", @@ -1218,7 +1218,7 @@ func Test_unpackParser_Parse(t *testing.T) { labels.FromStrings("cluster", "us-central1"), labels.FromStrings("cluster", "us-central1"), []byte(``), - noParserHints, + NoParserHints(), }, { "wrong json with hints", @@ -1240,7 +1240,7 @@ func Test_unpackParser_Parse(t *testing.T) { "cluster", "us-central1", ), []byte(`["foo","bar"]`), - noParserHints, + NoParserHints(), }, { "should rename", @@ -1254,7 +1254,7 @@ func Test_unpackParser_Parse(t *testing.T) { "cluster", "us-central1", ), []byte(`some message`), - noParserHints, + NoParserHints(), }, { "should not change log and labels if no packed entry", @@ -1266,7 +1266,7 @@ func Test_unpackParser_Parse(t *testing.T) { "cluster", "us-central1", ), []byte(`{"bar":1,"app":"foo","namespace":"prod","pod":{"uid":"1"}}`), - noParserHints, + NoParserHints(), }, { "non json with escaped quotes", @@ -1278,7 +1278,7 @@ func Test_unpackParser_Parse(t *testing.T) { "cluster", "us-central1", ), []byte(`I0303 17:49:45.976518 1526 kubelet_getters.go:178] "Pod status updated" pod="openshift-etcd/etcd-ip-10-0-150-50.us-east-2.compute.internal" status=Running`), - noParserHints, + NoParserHints(), }, { "invalid key names", @@ -1289,7 +1289,7 @@ func Test_unpackParser_Parse(t *testing.T) { "test_dash", "foo", ), []byte(`some message`), - noParserHints, + NoParserHints(), }, } for _, tt := range tests { diff --git a/pkg/logql/optimize.go b/pkg/logql/optimize.go index 867f2691d46c..2ea6dea919ec 100644 --- a/pkg/logql/optimize.go +++ b/pkg/logql/optimize.go @@ -8,7 +8,7 @@ func optimizeSampleExpr(expr syntax.SampleExpr) (syntax.SampleExpr, error) { // we skip sharding AST for now, it's not easy to clone them since they are not part of the language. expr.Walk(func(e syntax.Expr) { switch e.(type) { - case *ConcatSampleExpr, *DownstreamSampleExpr, *QuantileSketchEvalExpr, *QuantileSketchMergeExpr, *MergeFirstOverTimeExpr, *MergeLastOverTimeExpr: + case *ConcatSampleExpr, DownstreamSampleExpr, *QuantileSketchEvalExpr, *QuantileSketchMergeExpr, *MergeFirstOverTimeExpr, *MergeLastOverTimeExpr: skip = true return } diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go index 4222e820539a..a09790f61ed3 100644 --- a/pkg/logql/shardmapper.go +++ b/pkg/logql/shardmapper.go @@ -128,7 +128,7 @@ func (m ShardMapper) mapBinOpExpr(e *syntax.BinOpExpr, r *downstreamRecorder) (* if err != nil { return nil, 0, err } - if isNoOp(e.SampleExpr, rhsMapped) && !isLiteralOrVector(rhsMapped) { + if isNoOp(e.RHS, rhsMapped) && !isLiteralOrVector(rhsMapped) { // TODO: check if literal or vector rhsMapped = DownstreamSampleExpr{ shard: nil, diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go index 7a02640c8149..96955109a941 100644 --- a/pkg/logql/shardmapper_test.go +++ b/pkg/logql/shardmapper_test.go @@ -1446,16 +1446,84 @@ func TestMapping(t *testing.T) { }, }, }, + { + in: `quantile_over_time(0.99, {a="foo"} | unwrap bytes [1s]) by (a, b) > 1`, + expr: &syntax.BinOpExpr{ + SampleExpr: DownstreamSampleExpr{ + SampleExpr: &syntax.RangeAggregationExpr{ + Operation: syntax.OpRangeTypeQuantile, + Params: float64p(0.99), + Left: &syntax.LogRange{ + Left: &syntax.MatchersExpr{ + Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "a", "foo")}, + }, + Unwrap: &syntax.UnwrapExpr{ + Identifier: "bytes", + }, + Interval: 1 * time.Second, + }, + Grouping: &syntax.Grouping{ + Groups: []string{"a", "b"}, + }, + }, + }, + RHS: &syntax.LiteralExpr{ + Val: 1, + }, + Op: syntax.OpTypeGT, + Opts: &syntax.BinOpOptions{ + ReturnBool: false, + VectorMatching: &syntax.VectorMatching{}, + }, + }, + }, + { + in: `1 < quantile_over_time(0.99, {a="foo"} | unwrap bytes [1s]) by (a, b)`, + expr: &syntax.BinOpExpr{ + SampleExpr: &syntax.LiteralExpr{ + Val: 1, + }, + RHS: DownstreamSampleExpr{ + SampleExpr: &syntax.RangeAggregationExpr{ + Operation: syntax.OpRangeTypeQuantile, + Params: float64p(0.99), + Left: &syntax.LogRange{ + Left: &syntax.MatchersExpr{ + Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "a", "foo")}, + }, + Unwrap: &syntax.UnwrapExpr{ + Identifier: "bytes", + }, + Interval: 1 * time.Second, + }, + Grouping: &syntax.Grouping{ + Groups: []string{"a", "b"}, + }, + }, + }, + Op: syntax.OpTypeLT, + Opts: &syntax.BinOpOptions{ + ReturnBool: false, + VectorMatching: &syntax.VectorMatching{}, + }, + }, + }, } { t.Run(tc.in, func(t *testing.T) { ast, err := syntax.ParseExpr(tc.in) require.Equal(t, tc.err, err) mapped, _, err := m.Map(ast, nilShardMetrics.downstreamRecorder()) + switch e := mapped.(type) { + case syntax.SampleExpr: + optimized, err := optimizeSampleExpr(e) + require.NoError(t, err) + require.Equal(t, mapped.String(), optimized.String()) + } require.Equal(t, tc.err, err) - require.Equal(t, mapped.String(), tc.expr.String()) - require.Equal(t, mapped, tc.expr) + require.Equal(t, tc.expr.String(), mapped.String()) + require.Equal(t, tc.expr, mapped) }) } } diff --git a/pkg/logql/syntax/parser.go b/pkg/logql/syntax/parser.go index 710bf7132c4c..79213049f376 100644 --- a/pkg/logql/syntax/parser.go +++ b/pkg/logql/syntax/parser.go @@ -31,7 +31,13 @@ var parserPool = sync.Pool{ }, } -const maxInputSize = 5120 +// (E.Welch) We originally added this limit from fuzz testing and realizing there should be some maximum limit to an allowed query size. +// The original limit was 5120 based on some internet searching and a best estimate of what a reasonable limit would be. +// We have seen use cases with queries containing a lot of filter expressions or long expanded variable names where this limit was too small. +// Apparently the spec does not specify a limit, and more internet searching suggests almost all browsers will handle 100k+ length urls without issue +// Some limit here still seems prudent however, so the new limit is now 128k. +// Also note this is used to allocate the buffer for reading the query string, so there is some memory cost to making this larger. +const maxInputSize = 131072 func init() { // Improve the error messages coming out of yacc. diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 0c4a50f813ed..6ef0572d6bad 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -101,7 +101,7 @@ type Config struct { Tracing tracing.Config `yaml:"tracing"` Analytics analytics.Config `yaml:"analytics"` - LegacyReadTarget bool `yaml:"legacy_read_target,omitempty" doc:"hidden"` + LegacyReadTarget bool `yaml:"legacy_read_target,omitempty" doc:"hidden|deprecated"` Common common.Config `yaml:"common,omitempty"` @@ -136,9 +136,8 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { "It will, however, distort metrics, because it is counted as live memory. ", ) - //TODO(trevorwhitney): flip this to false with Loki 3.0 - f.BoolVar(&c.LegacyReadTarget, "legacy-read-mode", true, "Set to false to disable the legacy read mode and use new scalable mode with 3rd backend target. "+ - "The default will be flipped to false in the next Loki release.") + f.BoolVar(&c.LegacyReadTarget, "legacy-read-mode", false, "Deprecated. Set to true to enable the legacy read mode which includes the components from the backend target. "+ + "This setting is deprecated and will be removed in the next minor release.") f.DurationVar(&c.ShutdownDelay, "shutdown-delay", 0, "How long to wait between SIGTERM and shutdown. After receiving SIGTERM, Loki will report 503 Service Unavailable status via /ready endpoint.") @@ -647,7 +646,7 @@ func (t *Loki) setupModuleManager() error { Read: {QueryFrontend, Querier}, Write: {Ingester, Distributor}, - Backend: {QueryScheduler, Ruler, Compactor, IndexGateway}, + Backend: {QueryScheduler, Ruler, Compactor, IndexGateway, BloomGateway, BloomCompactor}, All: {QueryScheduler, QueryFrontend, Querier, Ingester, Distributor, Ruler, Compactor}, } @@ -694,13 +693,12 @@ func (t *Loki) setupModuleManager() error { } // Add bloom gateway ring in client mode to IndexGateway service dependencies if bloom filtering is enabled. - if t.Cfg.isModuleEnabled(IndexGateway) && t.Cfg.BloomGateway.Enabled { + if t.Cfg.BloomGateway.Enabled { deps[IndexGateway] = append(deps[IndexGateway], BloomGatewayRing) } - //TODO(poyzannur) not sure this is needed for BloomCompactor if t.Cfg.LegacyReadTarget { - deps[Read] = append(deps[Read], QueryScheduler, Ruler, Compactor, IndexGateway, BloomGateway, BloomCompactor) + deps[Read] = append(deps[Read], deps[Backend]...) } if t.Cfg.InternalServer.Enable { diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 1342a105f34b..8282098c85ae 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -800,12 +800,25 @@ func (disabledShuffleShardingLimits) MaxQueriersPerUser(_ string) uint { return func (disabledShuffleShardingLimits) MaxQueryCapacity(_ string) float64 { return 0 } +// ingesterQueryOptions exists simply to avoid dependency cycles when using querier.Config directly in queryrange.NewMiddleware +type ingesterQueryOptions struct { + querier.Config +} + +func (i ingesterQueryOptions) QueryStoreOnly() bool { + return i.Config.QueryStoreOnly +} +func (i ingesterQueryOptions) QueryIngestersWithin() time.Duration { + return i.Config.QueryIngestersWithin +} + func (t *Loki) initQueryFrontendMiddleware() (_ services.Service, err error) { level.Debug(util_log.Logger).Log("msg", "initializing query frontend tripperware") middleware, stopper, err := queryrange.NewMiddleware( t.Cfg.QueryRange, t.Cfg.Querier.Engine, + ingesterQueryOptions{t.Cfg.Querier}, util_log.Logger, t.Overrides, t.Cfg.SchemaConfig, diff --git a/pkg/loki/modules_test.go b/pkg/loki/modules_test.go index 19980e294412..0d07242b7537 100644 --- a/pkg/loki/modules_test.go +++ b/pkg/loki/modules_test.go @@ -175,6 +175,9 @@ func TestIndexGatewayRingMode_when_TargetIsLegacyReadOrBackend(t *testing.T) { { name: "leagcy read", target: Read, + transformer: func(cfg *Config) { + cfg.LegacyReadTarget = true + }, }, { name: "backend", diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go index 03332ee04677..1c271805bbda 100644 --- a/pkg/lokifrontend/frontend/transport/handler.go +++ b/pkg/lokifrontend/frontend/transport/handler.go @@ -12,6 +12,8 @@ import ( "strings" "time" + "github.com/grafana/dskit/flagext" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" @@ -42,13 +44,15 @@ var ( // Config for a Handler. type HandlerConfig struct { - LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` - MaxBodySize int64 `yaml:"max_body_size"` - QueryStatsEnabled bool `yaml:"query_stats_enabled"` + LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` + LogQueryRequestHeaders flagext.StringSliceCSV `yaml:"log_query_request_headers"` + MaxBodySize int64 `yaml:"max_body_size"` + QueryStatsEnabled bool `yaml:"query_stats_enabled"` } func (cfg *HandlerConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.LogQueriesLongerThan, "frontend.log-queries-longer-than", 0, "Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries.") + f.Var(&cfg.LogQueryRequestHeaders, "frontend.log-query-request-headers", "Comma-separated list of request header names to include in query logs. Applies to both query stats and slow queries logs.") f.Int64Var(&cfg.MaxBodySize, "frontend.max-body-size", 10*1024*1024, "Max body size for downstream prometheus.") f.BoolVar(&cfg.QueryStatsEnabled, "frontend.query-stats-enabled", false, "True to enable query statistics tracking. When enabled, a message with some statistics is logged for every query.") } @@ -206,9 +210,22 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer "fetched_chunks_bytes", numBytes, }, formatQueryString(queryString)...) + if len(f.cfg.LogQueryRequestHeaders) != 0 { + logMessage = append(logMessage, formatRequestHeaders(&r.Header, f.cfg.LogQueryRequestHeaders)...) + } + level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) } +func formatRequestHeaders(h *http.Header, headersToLog []string) (fields []interface{}) { + for _, s := range headersToLog { + if v := h.Get(s); v != "" { + fields = append(fields, fmt.Sprintf("header_%s", strings.ReplaceAll(strings.ToLower(s), "-", "_")), v) + } + } + return fields +} + func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) url.Values { // Use previously buffered body. r.Body = io.NopCloser(&bodyBuf) diff --git a/pkg/lokifrontend/frontend/transport/handler_test.go b/pkg/lokifrontend/frontend/transport/handler_test.go new file mode 100644 index 000000000000..19709168c1fd --- /dev/null +++ b/pkg/lokifrontend/frontend/transport/handler_test.go @@ -0,0 +1,23 @@ +package transport + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFormatRequestHeaders(t *testing.T) { + h := http.Header{} + h.Add("X-Header-To-Log", "i should be logged!") + h.Add("X-Header-To-Not-Log", "i shouldn't be logged!") + + fields := formatRequestHeaders(&h, []string{"X-Header-To-Log", "X-Header-Not-Present"}) + + expected := []interface{}{ + "header_x_header_to_log", + "i should be logged!", + } + + require.Equal(t, expected, fields) +} diff --git a/pkg/querier/queryrange/index_stats_cache.go b/pkg/querier/queryrange/index_stats_cache.go index a985167456a7..d52f2e22323f 100644 --- a/pkg/querier/queryrange/index_stats_cache.go +++ b/pkg/querier/queryrange/index_stats_cache.go @@ -93,6 +93,7 @@ func NewIndexStatsCacheMiddleware( merger queryrangebase.Merger, c cache.Cache, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, + iqo util.IngesterQueryOptions, shouldCache queryrangebase.ShouldCacheFn, parallelismForReq queryrangebase.ParallelismForReqFn, retentionEnabled bool, @@ -102,7 +103,7 @@ func NewIndexStatsCacheMiddleware( return queryrangebase.NewResultsCacheMiddleware( log, c, - IndexStatsSplitter{cacheKeyLimits{limits, transformer}}, + IndexStatsSplitter{cacheKeyLimits{limits, transformer, iqo}}, limits, merger, IndexStatsExtractor{}, diff --git a/pkg/querier/queryrange/index_stats_cache_test.go b/pkg/querier/queryrange/index_stats_cache_test.go index c8119c6b9fe2..1127b88576e1 100644 --- a/pkg/querier/queryrange/index_stats_cache_test.go +++ b/pkg/querier/queryrange/index_stats_cache_test.go @@ -37,6 +37,7 @@ func TestIndexStatsCache(t *testing.T) { c, nil, nil, + nil, func(_ context.Context, _ []string, _ queryrangebase.Request) int { return 1 }, @@ -180,6 +181,7 @@ func TestIndexStatsCache_RecentData(t *testing.T) { c, nil, nil, + nil, func(_ context.Context, _ []string, _ queryrangebase.Request) int { return 1 }, diff --git a/pkg/querier/queryrange/ingester_query_window.go b/pkg/querier/queryrange/ingester_query_window.go new file mode 100644 index 000000000000..7a161f40c007 --- /dev/null +++ b/pkg/querier/queryrange/ingester_query_window.go @@ -0,0 +1,33 @@ +package queryrange + +import ( + "time" + + "github.com/grafana/loki/pkg/util" + "github.com/grafana/loki/pkg/util/validation" +) + +// SplitIntervalForTimeRange returns the correct split interval to use. It accounts for the given upperBound value being +// within the ingester query window, in which case it returns the ingester query split (unless it's not set, then the default +// split interval will be used). +func SplitIntervalForTimeRange(iqo util.IngesterQueryOptions, limits Limits, defaultSplitFn func(string) time.Duration, tenantIDs []string, ref, upperBound time.Time) time.Duration { + split := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, defaultSplitFn) + + if iqo == nil { + return split + } + + // if the query is within the ingester query window, choose the ingester split duration (if configured), otherwise + // revert to the default split duration + ingesterQueryWindowStart := ref.Add(-iqo.QueryIngestersWithin()) + + // query is (even partially) within the ingester query window + if upperBound.After(ingesterQueryWindowStart) { + ingesterSplit := validation.MaxDurationOrZeroPerTenant(tenantIDs, limits.IngesterQuerySplitDuration) + if !iqo.QueryStoreOnly() && ingesterSplit > 0 { + split = ingesterSplit + } + } + + return split +} diff --git a/pkg/querier/queryrange/labels_cache.go b/pkg/querier/queryrange/labels_cache.go index 1e0dd225fa7b..66c811490403 100644 --- a/pkg/querier/queryrange/labels_cache.go +++ b/pkg/querier/queryrange/labels_cache.go @@ -11,18 +11,21 @@ import ( "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" + "github.com/grafana/loki/pkg/util" ) type cacheKeyLabels struct { Limits transformer UserIDTransformer + iqo util.IngesterQueryOptions } // GenerateCacheKey generates a cache key based on the userID, split duration and the interval of the request. // It also includes the label name and the provided query for label values request. func (i cacheKeyLabels) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { lr := r.(*LabelRequest) - split := i.MetadataQuerySplitDuration(userID) + + split := SplitIntervalForTimeRange(i.iqo, i.Limits, i.MetadataQuerySplitDuration, []string{userID}, time.Now().UTC(), r.GetEnd().UTC()) var currentInterval int64 if denominator := int64(split / time.Millisecond); denominator > 0 { @@ -77,6 +80,7 @@ func NewLabelsCacheMiddleware( merger queryrangebase.Merger, c cache.Cache, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, + iqo util.IngesterQueryOptions, shouldCache queryrangebase.ShouldCacheFn, parallelismForReq queryrangebase.ParallelismForReqFn, retentionEnabled bool, @@ -86,12 +90,14 @@ func NewLabelsCacheMiddleware( return queryrangebase.NewResultsCacheMiddleware( logger, c, - cacheKeyLabels{limits, transformer}, + cacheKeyLabels{limits, transformer, iqo}, limits, merger, labelsExtractor{}, cacheGenNumberLoader, - shouldCache, + func(ctx context.Context, r queryrangebase.Request) bool { + return shouldCacheMetadataReq(ctx, logger, shouldCache, r, limits) + }, parallelismForReq, retentionEnabled, metrics, diff --git a/pkg/querier/queryrange/labels_cache_test.go b/pkg/querier/queryrange/labels_cache_test.go index 73ab9ad8f4f8..4c645b8d19ce 100644 --- a/pkg/querier/queryrange/labels_cache_test.go +++ b/pkg/querier/queryrange/labels_cache_test.go @@ -3,6 +3,7 @@ package queryrange import ( "context" "fmt" + "regexp" "testing" "time" @@ -69,6 +70,7 @@ func TestLabelsCache(t *testing.T) { cache.NewMockCache(), nil, nil, + nil, func(_ context.Context, _ []string, _ queryrangebase.Request) int { return 1 }, @@ -249,3 +251,215 @@ func TestLabelsCache(t *testing.T) { }) } } + +func TestLabelCache_freshness(t *testing.T) { + testTime := time.Now().Add(-1 * time.Hour) + from, through := util.RoundToMilliseconds(testTime.Add(-1*time.Hour), testTime) + start, end := from.Time(), through.Time() + nonOverlappingStart, nonOverlappingEnd := from.Add(-24*time.Hour).Time(), through.Add(-24*time.Hour).Time() + + for _, tt := range []struct { + name string + req *LabelRequest + shouldCache bool + maxMetadataCacheFreshness time.Duration + }{ + { + name: "max metadata freshness not set", + req: &LabelRequest{ + LabelRequest: logproto.LabelRequest{ + Start: &start, + End: &end, + }, + }, + shouldCache: true, + }, + { + name: "req overlaps with max cache freshness window", + req: &LabelRequest{ + LabelRequest: logproto.LabelRequest{ + Start: &start, + End: &end, + }, + }, + maxMetadataCacheFreshness: 24 * time.Hour, + shouldCache: false, + }, + { + name: "req does not overlap max cache freshness window", + req: &LabelRequest{ + LabelRequest: logproto.LabelRequest{ + Start: &nonOverlappingStart, + End: &nonOverlappingEnd, + }, + }, + maxMetadataCacheFreshness: 24 * time.Hour, + shouldCache: true, + }, + } { + t.Run(tt.name, func(t *testing.T) { + cacheMiddleware, err := NewLabelsCacheMiddleware( + log.NewNopLogger(), + fakeLimits{ + metadataSplitDuration: map[string]time.Duration{ + "fake": 24 * time.Hour, + }, + maxMetadataCacheFreshness: tt.maxMetadataCacheFreshness, + }, + DefaultCodec, + cache.NewMockCache(), + nil, + nil, + nil, + func(_ context.Context, _ []string, _ queryrangebase.Request) int { + return 1 + }, + false, + nil, + nil, + ) + require.NoError(t, err) + + labelsResp := &LokiLabelNamesResponse{ + Status: "success", + Version: uint32(loghttp.VersionV1), + Data: []string{"bar", "buzz"}, + Statistics: stats.Result{ + Summary: stats.Summary{ + Splits: 1, + }, + }, + } + + called := 0 + handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + called++ + + // should request the entire length with no partitioning as nothing is cached yet. + require.Equal(t, tt.req.GetStart(), r.GetStart()) + require.Equal(t, tt.req.GetEnd(), r.GetEnd()) + + return labelsResp, nil + })) + + ctx := user.InjectOrgID(context.Background(), "fake") + got, err := handler.Do(ctx, tt.req) + require.NoError(t, err) + require.Equal(t, 1, called) // called actual handler, as not cached. + require.Equal(t, labelsResp, got) + + called = 0 + got, err = handler.Do(ctx, tt.req) + require.NoError(t, err) + if !tt.shouldCache { + require.Equal(t, 1, called) + } else { + require.Equal(t, 0, called) + } + require.Equal(t, labelsResp, got) + }) + } +} + +func TestLabelQueryCacheKey(t *testing.T) { + const ( + defaultTenant = "a" + alternateTenant = "b" + defaultSplit = time.Hour + ingesterSplit = 90 * time.Minute + ingesterQueryWindow = defaultSplit * 3 + ) + + l := fakeLimits{ + metadataSplitDuration: map[string]time.Duration{defaultTenant: defaultSplit, alternateTenant: defaultSplit}, + ingesterSplitDuration: map[string]time.Duration{defaultTenant: ingesterSplit}, + } + + cases := []struct { + name, tenantID string + start, end time.Time + expectedSplit time.Duration + iqo util.IngesterQueryOptions + values bool + }{ + { + name: "outside ingester query window", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-5 * time.Hour), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + { + name: "within ingester query window", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: ingesterSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + { + name: "within ingester query window, but query store only", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: true, + }, + }, + { + name: "within ingester query window, but no ingester split duration configured", + tenantID: alternateTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + } + + for _, values := range []bool{true, false} { + for _, tc := range cases { + t.Run(fmt.Sprintf("%s (values: %v)", tc.name, values), func(t *testing.T) { + keyGen := cacheKeyLabels{l, nil, tc.iqo} + + r := &LabelRequest{ + LabelRequest: logproto.LabelRequest{ + Start: &tc.start, + End: &tc.end, + }, + } + + const labelName = "foo" + const query = `{cluster="eu-west1"}` + + if values { + r.LabelRequest.Values = true + r.LabelRequest.Name = labelName + r.LabelRequest.Query = query + } + + // we use regex here because cache key always refers to the current time to get the ingester query window, + // and therefore we can't know the current interval apriori without duplicating the logic + var pattern *regexp.Regexp + if values { + pattern = regexp.MustCompile(fmt.Sprintf(`labelvalues:%s:%s:%s:(\d+):%d`, tc.tenantID, labelName, regexp.QuoteMeta(query), tc.expectedSplit)) + } else { + pattern = regexp.MustCompile(fmt.Sprintf(`labels:%s:(\d+):%d`, tc.tenantID, tc.expectedSplit)) + } + + require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tc.tenantID, r)) + }) + } + } +} diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index 673c995a600b..79cc9ad16a36 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -30,6 +30,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/pkg/storage/config" "github.com/grafana/loki/pkg/storage/stores/index/stats" + "github.com/grafana/loki/pkg/util" util_log "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/util/spanlogger" "github.com/grafana/loki/pkg/util/validation" @@ -102,10 +103,11 @@ type UserIDTransformer func(context.Context, string) string type cacheKeyLimits struct { Limits transformer UserIDTransformer + iqo util.IngesterQueryOptions } func (l cacheKeyLimits) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { - split := l.QuerySplitDuration(userID) + split := SplitIntervalForTimeRange(l.iqo, l.Limits, l.QuerySplitDuration, []string{userID}, time.Now().UTC(), r.GetEnd().UTC()) var currentInterval int64 if denominator := int64(split / time.Millisecond); denominator > 0 { diff --git a/pkg/querier/queryrange/limits/definitions.go b/pkg/querier/queryrange/limits/definitions.go index bd84e144fa47..e12255883bf4 100644 --- a/pkg/querier/queryrange/limits/definitions.go +++ b/pkg/querier/queryrange/limits/definitions.go @@ -15,6 +15,7 @@ type Limits interface { logql.Limits QuerySplitDuration(string) time.Duration MetadataQuerySplitDuration(string) time.Duration + IngesterQuerySplitDuration(string) time.Duration MaxQuerySeries(context.Context, string) int MaxEntriesLimitPerQuery(context.Context, string) int MinShardingLookback(string) time.Duration @@ -29,5 +30,6 @@ type Limits interface { MaxQueryBytesRead(context.Context, string) int MaxQuerierBytesRead(context.Context, string) int MaxStatsCacheFreshness(context.Context, string) time.Duration + MaxMetadataCacheFreshness(context.Context, string) time.Duration VolumeEnabled(string) bool } diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index 3b82c1dc9eab..a80cf96dde80 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "reflect" + "regexp" "sync" "testing" "time" @@ -22,6 +23,7 @@ import ( "github.com/grafana/loki/pkg/querier/plan" base "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/constants" util_log "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/util/math" @@ -48,17 +50,106 @@ func TestLimits(t *testing.T) { require.Equal( t, fmt.Sprintf("%s:%s:%d:%d:%d", "a", r.GetQuery(), r.GetStep(), r.GetStart().UnixMilli()/int64(time.Hour/time.Millisecond), int64(time.Hour)), - cacheKeyLimits{wrapped, nil}.GenerateCacheKey(context.Background(), "a", r), + cacheKeyLimits{wrapped, nil, nil}.GenerateCacheKey(context.Background(), "a", r), ) } +func TestMetricQueryCacheKey(t *testing.T) { + const ( + defaultTenant = "a" + alternateTenant = "b" + query = `sum(rate({foo="bar"}[1]))` + defaultSplit = time.Hour + ingesterSplit = 90 * time.Minute + ingesterQueryWindow = defaultSplit * 3 + ) + + var ( + step = (15 * time.Second).Milliseconds() + ) + + l := fakeLimits{ + splitDuration: map[string]time.Duration{defaultTenant: defaultSplit, alternateTenant: defaultSplit}, + ingesterSplitDuration: map[string]time.Duration{defaultTenant: ingesterSplit}, + } + + cases := []struct { + name, tenantID string + start, end time.Time + expectedSplit time.Duration + iqo util.IngesterQueryOptions + }{ + { + name: "outside ingester query window", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-5 * time.Hour), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + { + name: "within ingester query window", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: ingesterSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + { + name: "within ingester query window, but query store only", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: true, + }, + }, + { + name: "within ingester query window, but no ingester split duration configured", + tenantID: alternateTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + keyGen := cacheKeyLimits{l, nil, tc.iqo} + + r := &LokiRequest{ + Query: query, + StartTs: tc.start, + EndTs: tc.end, + Step: step, + } + + // we use regex here because cache key always refers to the current time to get the ingester query window, + // and therefore we can't know the current interval apriori without duplicating the logic + pattern := regexp.MustCompile(fmt.Sprintf(`%s:%s:%d:(\d+):%d`, tc.tenantID, regexp.QuoteMeta(query), step, tc.expectedSplit)) + require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tc.tenantID, r)) + }) + } +} + func Test_seriesLimiter(t *testing.T) { cfg := testConfig cfg.CacheResults = false cfg.CacheIndexStatsResults = false // split in 7 with 2 in // max. l := WithSplitByLimits(fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, time.Hour) - tpw, stopper, err := NewMiddleware(cfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ + tpw, stopper, err := NewMiddleware(cfg, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{ Configs: testSchemas, }, nil, false, nil, constants.Loki) if stopper != nil { @@ -228,7 +319,7 @@ func Test_MaxQueryParallelismDisable(t *testing.T) { } func Test_MaxQueryLookBack(t *testing.T) { - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{ + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, fakeLimits{ maxQueryLookback: 1 * time.Hour, maxQueryParallelism: 1, }, config.SchemaConfig{ @@ -308,7 +399,7 @@ func Test_MaxQueryLookBack_Types(t *testing.T) { } func Test_GenerateCacheKey_NoDivideZero(t *testing.T) { - l := cacheKeyLimits{WithSplitByLimits(nil, 0), nil} + l := cacheKeyLimits{WithSplitByLimits(nil, 0), nil, nil} start := time.Now() r := &LokiRequest{ Query: "qry", diff --git a/pkg/querier/queryrange/log_result_cache.go b/pkg/querier/queryrange/log_result_cache.go index c15568d9075a..fd26b67412a6 100644 --- a/pkg/querier/queryrange/log_result_cache.go +++ b/pkg/querier/queryrange/log_result_cache.go @@ -106,7 +106,8 @@ func (l *logResultCache) Do(ctx context.Context, req queryrangebase.Request) (qu interval := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, l.limits.QuerySplitDuration) // skip caching by if interval is unset - if interval == 0 { + // skip caching when limit is 0 as it would get registerted as empty result in the cache even if that time range contains log lines. + if interval == 0 || lokiReq.Limit == 0 { return l.next.Do(ctx, req) } // The first subquery might not be aligned. @@ -181,7 +182,7 @@ func (l *logResultCache) handleMiss(ctx context.Context, cacheKey string, req *L func (l *logResultCache) handleHit(ctx context.Context, cacheKey string, cachedRequest *LokiRequest, lokiReq *LokiRequest) (queryrangebase.Response, error) { l.metrics.CacheHit.Inc() // we start with an empty response - result := emptyResponse(cachedRequest) + result := emptyResponse(lokiReq) // if the request is the same and cover the whole time range, // we can just return the cached result. if cachedRequest.StartTs.UnixNano() <= lokiReq.StartTs.UnixNano() && cachedRequest.EndTs.UnixNano() >= lokiReq.EndTs.UnixNano() { diff --git a/pkg/querier/queryrange/log_result_cache_test.go b/pkg/querier/queryrange/log_result_cache_test.go index 5d67be33b84f..5da4aee7c4be 100644 --- a/pkg/querier/queryrange/log_result_cache_test.go +++ b/pkg/querier/queryrange/log_result_cache_test.go @@ -580,6 +580,54 @@ func Test_LogResultNonOverlappingCache(t *testing.T) { fake.AssertExpectations(t) } +func Test_LogResultCacheDifferentLimit(t *testing.T) { + var ( + ctx = user.InjectOrgID(context.Background(), "foo") + lrc = NewLogResultCache( + log.NewNopLogger(), + fakeLimits{ + splitDuration: map[string]time.Duration{"foo": time.Minute}, + }, + cache.NewMockCache(), + nil, + nil, + nil, + ) + ) + + req1 := &LokiRequest{ + StartTs: time.Unix(0, time.Minute.Nanoseconds()), + EndTs: time.Unix(0, 2*time.Minute.Nanoseconds()), + Limit: entriesLimit, + } + + req2 := &LokiRequest{ + StartTs: time.Unix(0, time.Minute.Nanoseconds()), + EndTs: time.Unix(0, 2*time.Minute.Nanoseconds()), + Limit: 10, + } + + fake := newFakeResponse([]mockResponse{ + { + RequestResponse: queryrangebase.RequestResponse{ + Request: req1, + Response: emptyResponse(req1), + }, + }, + }) + + h := lrc.Wrap(fake) + + resp, err := h.Do(ctx, req1) + require.NoError(t, err) + require.Equal(t, emptyResponse(req1), resp) + resp, err = h.Do(ctx, req2) + require.NoError(t, err) + require.Equal(t, emptyResponse(req2), resp) + + fake.AssertExpectations(t) +} + func TestExtractLokiResponse(t *testing.T) { for _, tc := range []struct { name string @@ -677,6 +725,7 @@ func newFakeResponse(responses []mockResponse) fakeResponse { for _, r := range responses { m.On("Do", mock.Anything, r.Request).Return(r.Response, r.err).Once() } + return fakeResponse{ Mock: m, } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 5f0aef4a1ab4..8223704eea02 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -120,6 +120,7 @@ func newResultsCacheFromConfig(cfg base.ResultsCacheConfig, registerer prometheu func NewMiddleware( cfg Config, engineOpts logql.EngineOpts, + iqo util.IngesterQueryOptions, log log.Logger, limits Limits, schema config.SchemaConfig, @@ -176,36 +177,36 @@ func NewMiddleware( var codec base.Codec = DefaultCodec - indexStatsTripperware, err := NewIndexStatsTripperware(cfg, log, limits, schema, codec, statsCache, + indexStatsTripperware, err := NewIndexStatsTripperware(cfg, log, limits, schema, codec, iqo, statsCache, cacheGenNumLoader, retentionEnabled, metrics, metricsNamespace) if err != nil { return nil, nil, err } - metricsTripperware, err := NewMetricTripperware(cfg, engineOpts, log, limits, schema, codec, resultsCache, + metricsTripperware, err := NewMetricTripperware(cfg, engineOpts, log, limits, schema, codec, iqo, resultsCache, cacheGenNumLoader, retentionEnabled, PrometheusExtractor{}, metrics, indexStatsTripperware, metricsNamespace) if err != nil { return nil, nil, err } - limitedTripperware, err := NewLimitedTripperware(cfg, engineOpts, log, limits, schema, metrics, indexStatsTripperware, codec) + limitedTripperware, err := NewLimitedTripperware(cfg, engineOpts, log, limits, schema, metrics, indexStatsTripperware, codec, iqo) if err != nil { return nil, nil, err } // NOTE: When we would start caching response from non-metric queries we would have to consider cache gen headers as well in // MergeResponse implementation for Loki codecs same as it is done in Cortex at https://github.com/cortexproject/cortex/blob/21bad57b346c730d684d6d0205efef133422ab28/pkg/querier/queryrange/query_range.go#L170 - logFilterTripperware, err := NewLogFilterTripperware(cfg, engineOpts, log, limits, schema, codec, resultsCache, metrics, indexStatsTripperware, metricsNamespace) + logFilterTripperware, err := NewLogFilterTripperware(cfg, engineOpts, log, limits, schema, codec, iqo, resultsCache, metrics, indexStatsTripperware, metricsNamespace) if err != nil { return nil, nil, err } - seriesTripperware, err := NewSeriesTripperware(cfg, log, limits, metrics, schema, codec, seriesCache, cacheGenNumLoader, retentionEnabled, metricsNamespace) + seriesTripperware, err := NewSeriesTripperware(cfg, log, limits, metrics, schema, codec, iqo, seriesCache, cacheGenNumLoader, retentionEnabled, metricsNamespace) if err != nil { return nil, nil, err } - labelsTripperware, err := NewLabelsTripperware(cfg, log, limits, codec, labelsCache, cacheGenNumLoader, retentionEnabled, metrics, schema, metricsNamespace) + labelsTripperware, err := NewLabelsTripperware(cfg, log, limits, codec, iqo, labelsCache, cacheGenNumLoader, retentionEnabled, metrics, schema, metricsNamespace) if err != nil { return nil, nil, err } @@ -215,7 +216,7 @@ func NewMiddleware( return nil, nil, err } - seriesVolumeTripperware, err := NewVolumeTripperware(cfg, log, limits, schema, codec, volumeCache, cacheGenNumLoader, retentionEnabled, metrics, metricsNamespace) + seriesVolumeTripperware, err := NewVolumeTripperware(cfg, log, limits, schema, codec, iqo, volumeCache, cacheGenNumLoader, retentionEnabled, metrics, metricsNamespace) if err != nil { return nil, nil, err } @@ -406,18 +407,7 @@ func getOperation(path string) string { } // NewLogFilterTripperware creates a new frontend tripperware responsible for handling log requests. -func NewLogFilterTripperware( - cfg Config, - engineOpts logql.EngineOpts, - log log.Logger, - limits Limits, - schema config.SchemaConfig, - merger base.Merger, - c cache.Cache, - metrics *Metrics, - indexStatsTripperware base.Middleware, - metricsNamespace string, -) (base.Middleware, error) { +func NewLogFilterTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, merger base.Merger, iqo util.IngesterQueryOptions, c cache.Cache, metrics *Metrics, indexStatsTripperware base.Middleware, metricsNamespace string) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := indexStatsTripperware.Wrap(next) @@ -426,7 +416,7 @@ func NewLogFilterTripperware( NewLimitsMiddleware(limits), NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, newDefaultSplitter(limits, iqo), metrics.SplitByMetrics), } if cfg.CacheResults { @@ -481,16 +471,7 @@ func NewLogFilterTripperware( } // NewLimitedTripperware creates a new frontend tripperware responsible for handling log requests which are label matcher only, no filter expression. -func NewLimitedTripperware( - _ Config, - engineOpts logql.EngineOpts, - log log.Logger, - limits Limits, - schema config.SchemaConfig, - metrics *Metrics, - indexStatsTripperware base.Middleware, - merger base.Merger, -) (base.Middleware, error) { +func NewLimitedTripperware(_ Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, metrics *Metrics, indexStatsTripperware base.Middleware, merger base.Merger, iqo util.IngesterQueryOptions) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := indexStatsTripperware.Wrap(next) @@ -499,7 +480,7 @@ func NewLimitedTripperware( NewLimitsMiddleware(limits), NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, limitedQuerySplits), merger, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, limitedQuerySplits), merger, newDefaultSplitter(limits, iqo), metrics.SplitByMetrics), NewQuerierSizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), } @@ -518,6 +499,7 @@ func NewSeriesTripperware( metrics *Metrics, schema config.SchemaConfig, merger base.Merger, + iqo util.IngesterQueryOptions, c cache.Cache, cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, @@ -532,6 +514,7 @@ func NewSeriesTripperware( merger, c, cacheGenNumLoader, + iqo, func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, @@ -558,7 +541,7 @@ func NewSeriesTripperware( StatsCollectorMiddleware(), NewLimitsMiddleware(limits), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, newDefaultSplitter(limits, iqo), metrics.SplitByMetrics), } if cfg.CacheSeriesResults { @@ -567,7 +550,6 @@ func NewSeriesTripperware( base.InstrumentMiddleware("series_results_cache", metrics.InstrumentMiddlewareMetrics), cacheMiddleware, ) - } if cfg.MaxRetries > 0 { @@ -601,6 +583,7 @@ func NewLabelsTripperware( log log.Logger, limits Limits, merger base.Merger, + iqo util.IngesterQueryOptions, c cache.Cache, cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, @@ -617,6 +600,7 @@ func NewLabelsTripperware( merger, c, cacheGenNumLoader, + iqo, func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, @@ -643,7 +627,7 @@ func NewLabelsTripperware( StatsCollectorMiddleware(), NewLimitsMiddleware(limits), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, newDefaultSplitter(limits, iqo), metrics.SplitByMetrics), } if cfg.CacheLabelResults { @@ -652,7 +636,6 @@ func NewLabelsTripperware( base.InstrumentMiddleware("label_results_cache", metrics.InstrumentMiddlewareMetrics), cacheMiddleware, ) - } if cfg.MaxRetries > 0 { @@ -669,22 +652,8 @@ func NewLabelsTripperware( } // NewMetricTripperware creates a new frontend tripperware responsible for handling metric queries -func NewMetricTripperware( - cfg Config, - engineOpts logql.EngineOpts, - log log.Logger, - limits Limits, - schema config.SchemaConfig, - merger base.Merger, - c cache.Cache, - cacheGenNumLoader base.CacheGenNumberLoader, - retentionEnabled bool, - extractor base.Extractor, - metrics *Metrics, - indexStatsTripperware base.Middleware, - metricsNamespace string, -) (base.Middleware, error) { - cacheKey := cacheKeyLimits{limits, cfg.Transformer} +func NewMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, merger base.Merger, iqo util.IngesterQueryOptions, c cache.Cache, cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, extractor base.Extractor, metrics *Metrics, indexStatsTripperware base.Middleware, metricsNamespace string) (base.Middleware, error) { + cacheKey := cacheKeyLimits{limits, cfg.Transformer, iqo} var queryCacheMiddleware base.Middleware if cfg.CacheResults { var err error @@ -737,7 +706,7 @@ func NewMetricTripperware( queryRangeMiddleware, NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, merger, splitMetricByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, newMetricQuerySplitter(limits, iqo), metrics.SplitByMetrics), ) if cfg.CacheResults { @@ -793,16 +762,7 @@ func NewMetricTripperware( } // NewInstantMetricTripperware creates a new frontend tripperware responsible for handling metric queries -func NewInstantMetricTripperware( - cfg Config, - engineOpts logql.EngineOpts, - log log.Logger, - limits Limits, - schema config.SchemaConfig, - metrics *Metrics, - indexStatsTripperware base.Middleware, - metricsNamespace string, -) (base.Middleware, error) { +func NewInstantMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, metrics *Metrics, indexStatsTripperware base.Middleware, metricsNamespace string) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := indexStatsTripperware.Wrap(next) @@ -844,21 +804,10 @@ func NewInstantMetricTripperware( }), nil } -func NewVolumeTripperware( - cfg Config, - log log.Logger, - limits Limits, - schema config.SchemaConfig, - merger base.Merger, - c cache.Cache, - cacheGenNumLoader base.CacheGenNumberLoader, - retentionEnabled bool, - metrics *Metrics, - metricsNamespace string, -) (base.Middleware, error) { +func NewVolumeTripperware(cfg Config, log log.Logger, limits Limits, schema config.SchemaConfig, merger base.Merger, iqo util.IngesterQueryOptions, c cache.Cache, cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, metrics *Metrics, metricsNamespace string) (base.Middleware, error) { // Parallelize the volume requests, so it doesn't send a huge request to a single index-gw (i.e. {app=~".+"} for 30d). // Indices are sharded by 24 hours, so we split the volume request in 24h intervals. - limits = WithSplitByLimits(limits, 24*time.Hour) + limits = WithSplitByLimits(limits, indexStatsQuerySplitInterval) var cacheMiddleware base.Middleware if cfg.CacheVolumeResults { var err error @@ -868,6 +817,7 @@ func NewVolumeTripperware( merger, c, cacheGenNumLoader, + iqo, func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, @@ -894,6 +844,7 @@ func NewVolumeTripperware( cacheMiddleware, cfg, merger, + newDefaultSplitter(limits, iqo), limits, log, metrics, @@ -962,18 +913,7 @@ func volumeFeatureFlagRoundTripper(nextTW base.Middleware, limits Limits) base.M }) } -func NewIndexStatsTripperware( - cfg Config, - log log.Logger, - limits Limits, - schema config.SchemaConfig, - merger base.Merger, - c cache.Cache, - cacheGenNumLoader base.CacheGenNumberLoader, - retentionEnabled bool, - metrics *Metrics, - metricsNamespace string, -) (base.Middleware, error) { +func NewIndexStatsTripperware(cfg Config, log log.Logger, limits Limits, schema config.SchemaConfig, merger base.Merger, iqo util.IngesterQueryOptions, c cache.Cache, cacheGenNumLoader base.CacheGenNumberLoader, retentionEnabled bool, metrics *Metrics, metricsNamespace string) (base.Middleware, error) { limits = WithSplitByLimits(limits, indexStatsQuerySplitInterval) var cacheMiddleware base.Middleware @@ -985,6 +925,7 @@ func NewIndexStatsTripperware( merger, c, cacheGenNumLoader, + iqo, func(_ context.Context, r base.Request) bool { return !r.GetCachingOptions().Disabled }, @@ -1011,6 +952,7 @@ func NewIndexStatsTripperware( cacheMiddleware, cfg, merger, + newDefaultSplitter(limits, iqo), limits, log, metrics, @@ -1028,6 +970,7 @@ func sharedIndexTripperware( cacheMiddleware base.Middleware, cfg Config, merger base.Merger, + split splitter, limits Limits, log log.Logger, metrics *Metrics, @@ -1038,7 +981,7 @@ func sharedIndexTripperware( middlewares := []base.Middleware{ NewLimitsMiddleware(limits), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - SplitByIntervalMiddleware(schema.Configs, limits, merger, splitByTime, metrics.SplitByMetrics), + SplitByIntervalMiddleware(schema.Configs, limits, merger, split, metrics.SplitByMetrics), } if cacheMiddleware != nil { diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 883f9b14226b..c7c7cff4595a 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -189,7 +189,7 @@ func TestMetricsTripperware(t *testing.T) { noCacheTestCfg := testConfig noCacheTestCfg.CacheResults = false noCacheTestCfg.CacheIndexStatsResults = false - tpw, stopper, err := NewMiddleware(noCacheTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ + tpw, stopper, err := NewMiddleware(noCacheTestCfg, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{ Configs: testSchemasTSDB, }, nil, false, nil, constants.Loki) if stopper != nil { @@ -240,7 +240,7 @@ func TestMetricsTripperware(t *testing.T) { require.Error(t, err) // Configure with cache - tpw, stopper, err = NewMiddleware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{ + tpw, stopper, err = NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{ Configs: testSchemasTSDB, }, nil, false, nil, constants.Loki) if stopper != nil { @@ -278,7 +278,7 @@ func TestLogFilterTripperware(t *testing.T) { noCacheTestCfg := testConfig noCacheTestCfg.CacheResults = false noCacheTestCfg.CacheIndexStatsResults = false - tpw, stopper, err := NewMiddleware(noCacheTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(noCacheTestCfg, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -347,7 +347,7 @@ func TestInstantQueryTripperware(t *testing.T) { queryTimeout: 1 * time.Minute, maxSeries: 1, } - tpw, stopper, err := NewMiddleware(testShardingConfigNoCache, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testShardingConfigNoCache, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{Configs: testSchemasTSDB}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -403,7 +403,7 @@ func TestSeriesTripperware(t *testing.T) { "1": 24 * time.Hour, }, } - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -441,7 +441,7 @@ func TestLabelsTripperware(t *testing.T) { "1": 24 * time.Hour, }, } - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -487,7 +487,7 @@ func TestLabelsTripperware(t *testing.T) { } func TestIndexStatsTripperware(t *testing.T) { - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -537,7 +537,7 @@ func TestVolumeTripperware(t *testing.T) { volumeEnabled: true, maxSeries: 42, } - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -593,7 +593,7 @@ func TestVolumeTripperware(t *testing.T) { }) t.Run("range queries return a prometheus style metrics response, putting volumes in buckets based on the step", func(t *testing.T) { - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, volumeEnabled: true}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, volumeEnabled: true}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -784,7 +784,7 @@ func TestNewTripperware_Caches(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - _, stopper, err := NewMiddleware(tc.config, testEngineOpts, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + _, stopper, err := NewMiddleware(tc.config, testEngineOpts, nil, util_log.Logger, fakeLimits{maxQueryLength: 48 * time.Hour, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -814,7 +814,7 @@ func TestNewTripperware_Caches(t *testing.T) { } func TestLogNoFilter(t *testing.T) { - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, fakeLimits{maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -871,7 +871,7 @@ func TestPostQueries(t *testing.T) { } func TestTripperware_EntriesLimit(t *testing.T) { - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000, maxQueryParallelism: 1}, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -920,7 +920,7 @@ func TestTripperware_RequiredLabels(t *testing.T) { } { t.Run(test.qs, func(t *testing.T) { limits := fakeLimits{maxEntriesLimitPerQuery: 5000, maxQueryParallelism: 1, requiredLabels: []string{"app"}} - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -1027,7 +1027,7 @@ func TestTripperware_RequiredNumberLabels(t *testing.T) { maxQueryParallelism: 1, requiredNumberLabels: tc.requiredNumberLabels, } - tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(testConfig, testEngineOpts, nil, util_log.Logger, limits, config.SchemaConfig{Configs: testSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -1218,7 +1218,7 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - tpw, stopper, err := NewMiddleware(statsTestCfg, testEngineOpts, util_log.Logger, l, config.SchemaConfig{Configs: statsSchemas}, nil, false, nil, constants.Loki) + tpw, stopper, err := NewMiddleware(statsTestCfg, testEngineOpts, nil, util_log.Logger, l, config.SchemaConfig{Configs: statsSchemas}, nil, false, nil, constants.Loki) if stopper != nil { defer stopper.Stop() } @@ -1237,22 +1237,24 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) { } type fakeLimits struct { - maxQueryLength time.Duration - maxQueryParallelism int - tsdbMaxQueryParallelism int - maxQueryLookback time.Duration - maxEntriesLimitPerQuery int - maxSeries int - splitDuration map[string]time.Duration - metadataSplitDuration map[string]time.Duration - minShardingLookback time.Duration - queryTimeout time.Duration - requiredLabels []string - requiredNumberLabels int - maxQueryBytesRead int - maxQuerierBytesRead int - maxStatsCacheFreshness time.Duration - volumeEnabled bool + maxQueryLength time.Duration + maxQueryParallelism int + tsdbMaxQueryParallelism int + maxQueryLookback time.Duration + maxEntriesLimitPerQuery int + maxSeries int + splitDuration map[string]time.Duration + metadataSplitDuration map[string]time.Duration + ingesterSplitDuration map[string]time.Duration + minShardingLookback time.Duration + queryTimeout time.Duration + requiredLabels []string + requiredNumberLabels int + maxQueryBytesRead int + maxQuerierBytesRead int + maxStatsCacheFreshness time.Duration + maxMetadataCacheFreshness time.Duration + volumeEnabled bool } func (f fakeLimits) QuerySplitDuration(key string) time.Duration { @@ -1269,6 +1271,13 @@ func (f fakeLimits) MetadataQuerySplitDuration(key string) time.Duration { return f.metadataSplitDuration[key] } +func (f fakeLimits) IngesterQuerySplitDuration(key string) time.Duration { + if f.ingesterSplitDuration == nil { + return 0 + } + return f.ingesterSplitDuration[key] +} + func (f fakeLimits) MaxQueryLength(context.Context, string) time.Duration { if f.maxQueryLength == 0 { return time.Hour * 7 @@ -1336,6 +1345,10 @@ func (f fakeLimits) MaxStatsCacheFreshness(_ context.Context, _ string) time.Dur return f.maxStatsCacheFreshness } +func (f fakeLimits) MaxMetadataCacheFreshness(_ context.Context, _ string) time.Duration { + return f.maxMetadataCacheFreshness +} + func (f fakeLimits) VolumeEnabled(_ string) bool { return f.volumeEnabled } @@ -1344,6 +1357,19 @@ func (f fakeLimits) TSDBMaxBytesPerShard(_ string) int { return valid.DefaultTSDBMaxBytesPerShard } +type ingesterQueryOpts struct { + queryStoreOnly bool + queryIngestersWithin time.Duration +} + +func (i ingesterQueryOpts) QueryStoreOnly() bool { + return i.queryStoreOnly +} + +func (i ingesterQueryOpts) QueryIngestersWithin() time.Duration { + return i.queryIngestersWithin +} + func counter() (*int, base.Handler) { count := 0 var lock sync.Mutex diff --git a/pkg/querier/queryrange/series_cache.go b/pkg/querier/queryrange/series_cache.go index 9ad67f70acf5..bbbf96e2dd70 100644 --- a/pkg/querier/queryrange/series_cache.go +++ b/pkg/querier/queryrange/series_cache.go @@ -9,21 +9,29 @@ import ( "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + + "github.com/grafana/dskit/tenant" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache" + "github.com/grafana/loki/pkg/util" + "github.com/grafana/loki/pkg/util/validation" ) type cacheKeySeries struct { Limits transformer UserIDTransformer + iqo util.IngesterQueryOptions } // GenerateCacheKey generates a cache key based on the userID, matchers, split duration and the interval of the request. func (i cacheKeySeries) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string { sr := r.(*LokiSeriesRequest) - split := i.MetadataQuerySplitDuration(userID) + + split := SplitIntervalForTimeRange(i.iqo, i.Limits, i.MetadataQuerySplitDuration, []string{userID}, time.Now().UTC(), r.GetEnd().UTC()) var currentInterval int64 if denominator := int64(split / time.Millisecond); denominator > 0 { @@ -34,11 +42,12 @@ func (i cacheKeySeries) GenerateCacheKey(ctx context.Context, userID string, r r userID = i.transformer(ctx, userID) } - matchers := sr.GetMatch() - sort.Strings(matchers) - matcherStr := strings.Join(matchers, ",") + return fmt.Sprintf("series:%s:%s:%d:%d", userID, i.joinMatchers(sr.GetMatch()), currentInterval, split) +} - return fmt.Sprintf("series:%s:%s:%d:%d", userID, matcherStr, currentInterval, split) +func (i cacheKeySeries) joinMatchers(matchers []string) string { + sort.Strings(matchers) + return strings.Join(matchers, ",") } type seriesExtractor struct{} @@ -78,6 +87,7 @@ func NewSeriesCacheMiddleware( merger queryrangebase.Merger, c cache.Cache, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, + iqo util.IngesterQueryOptions, shouldCache queryrangebase.ShouldCacheFn, parallelismForReq queryrangebase.ParallelismForReqFn, retentionEnabled bool, @@ -87,14 +97,33 @@ func NewSeriesCacheMiddleware( return queryrangebase.NewResultsCacheMiddleware( logger, c, - cacheKeySeries{limits, transformer}, + cacheKeySeries{limits, transformer, iqo}, limits, merger, seriesExtractor{}, cacheGenNumberLoader, - shouldCache, + func(ctx context.Context, r queryrangebase.Request) bool { + return shouldCacheMetadataReq(ctx, logger, shouldCache, r, limits) + }, parallelismForReq, retentionEnabled, metrics, ) } + +func shouldCacheMetadataReq(ctx context.Context, logger log.Logger, shouldCache queryrangebase.ShouldCacheFn, req queryrangebase.Request, l Limits) bool { + if shouldCache != nil && !shouldCache(ctx, req) { + return false + } + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + level.Error(logger).Log("msg", "failed to determine if metadata request should be cached. won't cache", "err", err) + return false + } + + cacheFreshnessCapture := func(id string) time.Duration { return l.MaxMetadataCacheFreshness(ctx, id) } + maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, cacheFreshnessCapture) + + return maxCacheFreshness == 0 || model.Time(req.GetEnd().UnixMilli()).Before(model.Now().Add(-maxCacheFreshness)) +} diff --git a/pkg/querier/queryrange/series_cache_test.go b/pkg/querier/queryrange/series_cache_test.go index abe992001217..d73efa9deea8 100644 --- a/pkg/querier/queryrange/series_cache_test.go +++ b/pkg/querier/queryrange/series_cache_test.go @@ -3,6 +3,7 @@ package queryrange import ( "context" "fmt" + "regexp" "testing" "time" @@ -77,6 +78,7 @@ func TestSeriesCache(t *testing.T) { cache.NewMockCache(), nil, nil, + nil, func(_ context.Context, _ []string, _ queryrangebase.Request) int { return 1 }, @@ -312,3 +314,203 @@ func TestSeriesCache(t *testing.T) { } }) } + +func TestSeriesCache_freshness(t *testing.T) { + testTime := time.Now().Add(-1 * time.Hour) + from, through := util.RoundToMilliseconds(testTime.Add(-1*time.Hour), testTime) + + for _, tt := range []struct { + name string + req *LokiSeriesRequest + shouldCache bool + maxMetadataCacheFreshness time.Duration + }{ + { + name: "max metadata freshness not set", + req: &LokiSeriesRequest{ + StartTs: from.Time(), + EndTs: through.Time(), + Match: []string{`{namespace=~".*"}`}, + Path: seriesAPIPath, + }, + shouldCache: true, + }, + { + name: "req overlaps with max cache freshness window", + req: &LokiSeriesRequest{ + StartTs: from.Time(), + EndTs: through.Time(), + Match: []string{`{namespace=~".*"}`}, + Path: seriesAPIPath, + }, + maxMetadataCacheFreshness: 24 * time.Hour, + shouldCache: false, + }, + { + name: "req does not overlap max cache freshness window", + req: &LokiSeriesRequest{ + StartTs: from.Add(-24 * time.Hour).Time(), + EndTs: through.Add(-24 * time.Hour).Time(), + Match: []string{`{namespace=~".*"}`}, + Path: seriesAPIPath, + }, + maxMetadataCacheFreshness: 24 * time.Hour, + shouldCache: true, + }, + } { + t.Run(tt.name, func(t *testing.T) { + cacheMiddleware, err := NewSeriesCacheMiddleware( + log.NewNopLogger(), + fakeLimits{ + metadataSplitDuration: map[string]time.Duration{ + "fake": 24 * time.Hour, + }, + maxMetadataCacheFreshness: tt.maxMetadataCacheFreshness, + }, + DefaultCodec, + cache.NewMockCache(), + nil, + nil, + nil, + func(_ context.Context, _ []string, _ queryrangebase.Request) int { + return 1 + }, + false, + nil, + nil, + ) + require.NoError(t, err) + + seriesResp := &LokiSeriesResponse{ + Status: "success", + Version: uint32(loghttp.VersionV1), + Data: []logproto.SeriesIdentifier{ + { + Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}}, + }, + }, + Statistics: stats.Result{ + Summary: stats.Summary{ + Splits: 1, + }, + }, + } + + called := 0 + handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + called++ + + // should request the entire length with no partitioning as nothing is cached yet. + require.Equal(t, tt.req.GetStart(), r.GetStart()) + require.Equal(t, tt.req.GetEnd(), r.GetEnd()) + + return seriesResp, nil + })) + + ctx := user.InjectOrgID(context.Background(), "fake") + got, err := handler.Do(ctx, tt.req) + require.NoError(t, err) + require.Equal(t, 1, called) // called actual handler, as not cached. + require.Equal(t, seriesResp, got) + + called = 0 + got, err = handler.Do(ctx, tt.req) + require.NoError(t, err) + if !tt.shouldCache { + require.Equal(t, 1, called) + } else { + require.Equal(t, 0, called) + } + require.Equal(t, seriesResp, got) + }) + } +} + +func TestSeriesQueryCacheKey(t *testing.T) { + const ( + defaultTenant = "a" + alternateTenant = "b" + defaultSplit = time.Hour + ingesterSplit = 90 * time.Minute + ingesterQueryWindow = defaultSplit * 3 + ) + + l := fakeLimits{ + metadataSplitDuration: map[string]time.Duration{defaultTenant: defaultSplit, alternateTenant: defaultSplit}, + ingesterSplitDuration: map[string]time.Duration{defaultTenant: ingesterSplit}, + } + + cases := []struct { + name, tenantID string + start, end time.Time + expectedSplit time.Duration + iqo util.IngesterQueryOptions + values bool + }{ + { + name: "outside ingester query window", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-5 * time.Hour), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + { + name: "within ingester query window", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: ingesterSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + { + name: "within ingester query window, but query store only", + tenantID: defaultTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: true, + }, + }, + { + name: "within ingester query window, but no ingester split duration configured", + tenantID: alternateTenant, + start: time.Now().Add(-6 * time.Hour), + end: time.Now().Add(-ingesterQueryWindow / 2), + expectedSplit: defaultSplit, + iqo: ingesterQueryOpts{ + queryIngestersWithin: ingesterQueryWindow, + queryStoreOnly: false, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + matchers := []string{`{namespace="prod"}`, `{service="foo"}`} + + keyGen := cacheKeySeries{l, nil, tc.iqo} + + r := &LokiSeriesRequest{ + StartTs: tc.start, + EndTs: tc.end, + Match: matchers, + Path: seriesAPIPath, + } + + // we use regex here because cache key always refers to the current time to get the ingester query window, + // and therefore we can't know the current interval apriori without duplicating the logic + pattern := regexp.MustCompile(fmt.Sprintf(`series:%s:%s:(\d+):%d`, tc.tenantID, regexp.QuoteMeta(keyGen.joinMatchers(matchers)), tc.expectedSplit)) + + require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tc.tenantID, r)) + }) + } +} diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index 9e2eda4b1942..ef05aa969ec1 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -21,7 +21,6 @@ import ( "github.com/grafana/loki/pkg/logql/syntax" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" - "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/validation" ) @@ -56,13 +55,11 @@ type splitByInterval struct { limits Limits merger queryrangebase.Merger metrics *SplitByMetrics - splitter Splitter + splitter splitter } -type Splitter func(req queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) - // SplitByIntervalMiddleware creates a new Middleware that splits log requests by a given interval. -func SplitByIntervalMiddleware(configs []config.PeriodConfig, limits Limits, merger queryrangebase.Merger, splitter Splitter, metrics *SplitByMetrics) queryrangebase.Middleware { +func SplitByIntervalMiddleware(configs []config.PeriodConfig, limits Limits, merger queryrangebase.Merger, splitter splitter, metrics *SplitByMetrics) queryrangebase.Middleware { if metrics == nil { metrics = NewSplitByMetrics(nil) } @@ -189,7 +186,7 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que case *LokiSeriesRequest, *LabelRequest: interval = validation.MaxDurationOrZeroPerTenant(tenantIDs, h.limits.MetadataQuerySplitDuration) default: - interval = validation.MaxDurationOrZeroPerTenant(tenantIDs, h.limits.QuerySplitDuration) + interval = validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, h.limits.QuerySplitDuration) } // skip split by if unset @@ -197,7 +194,7 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que return h.next.Do(ctx, r) } - intervals, err := h.splitter(r, interval) + intervals, err := h.splitter.split(time.Now().UTC(), tenantIDs, r, interval) if err != nil { return nil, err } @@ -251,73 +248,6 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que return h.merger.MergeResponse(resps...) } -func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) { - var reqs []queryrangebase.Request - - switch r := req.(type) { - case *LokiRequest: - util.ForInterval(interval, r.StartTs, r.EndTs, false, func(start, end time.Time) { - reqs = append(reqs, &LokiRequest{ - Query: r.Query, - Limit: r.Limit, - Step: r.Step, - Interval: r.Interval, - Direction: r.Direction, - Path: r.Path, - StartTs: start, - EndTs: end, - Plan: r.Plan, - }) - }) - case *LokiSeriesRequest: - // metadata queries have end time inclusive. - // Set endTimeInclusive to true so that ForInterval keeps a gap of 1ms between splits to - // avoid querying duplicate data in adjacent queries. - util.ForInterval(interval, r.StartTs, r.EndTs, true, func(start, end time.Time) { - reqs = append(reqs, &LokiSeriesRequest{ - Match: r.Match, - Path: r.Path, - StartTs: start, - EndTs: end, - Shards: r.Shards, - }) - }) - case *LabelRequest: - // metadata queries have end time inclusive. - // Set endTimeInclusive to true so that ForInterval keeps a gap of 1ms between splits to - // avoid querying duplicate data in adjacent queries. - util.ForInterval(interval, *r.Start, *r.End, true, func(start, end time.Time) { - reqs = append(reqs, NewLabelRequest(start, end, r.Query, r.Name, r.Path())) - }) - case *logproto.IndexStatsRequest: - startTS := r.GetStart() - endTS := r.GetEnd() - util.ForInterval(interval, startTS, endTS, true, func(start, end time.Time) { - reqs = append(reqs, &logproto.IndexStatsRequest{ - From: model.TimeFromUnix(start.Unix()), - Through: model.TimeFromUnix(end.Unix()), - Matchers: r.GetMatchers(), - }) - }) - case *logproto.VolumeRequest: - startTS := r.GetStart() - endTS := r.GetEnd() - util.ForInterval(interval, startTS, endTS, true, func(start, end time.Time) { - reqs = append(reqs, &logproto.VolumeRequest{ - From: model.TimeFromUnix(start.Unix()), - Through: model.TimeFromUnix(end.Unix()), - Matchers: r.GetMatchers(), - Limit: r.Limit, - TargetLabels: r.TargetLabels, - AggregateBy: r.AggregateBy, - }) - }) - default: - return nil, nil - } - return reqs, nil -} - // maxRangeVectorAndOffsetDurationFromQueryString func maxRangeVectorAndOffsetDurationFromQueryString(q string) (time.Duration, time.Duration, error) { parsed, err := syntax.ParseExpr(q) @@ -346,92 +276,3 @@ func maxRangeVectorAndOffsetDuration(expr syntax.Expr) (time.Duration, time.Dura }) return maxRVDuration, maxOffset, nil } - -// reduceSplitIntervalForRangeVector reduces the split interval for a range query based on the duration of the range vector. -// Large range vector durations will not be split into smaller intervals because it can cause the queries to be slow by over-processing data. -func reduceSplitIntervalForRangeVector(r *LokiRequest, interval time.Duration) (time.Duration, error) { - maxRange, _, err := maxRangeVectorAndOffsetDuration(r.Plan.AST) - if err != nil { - return 0, err - } - if maxRange > interval { - return maxRange, nil - } - return interval, nil -} - -func splitMetricByTime(r queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) { - var reqs []queryrangebase.Request - - lokiReq := r.(*LokiRequest) - - interval, err := reduceSplitIntervalForRangeVector(lokiReq, interval) - if err != nil { - return nil, err - } - - // step align start and end time of the query. Start time is rounded down and end time is rounded up. - stepNs := r.GetStep() * 1e6 - startNs := lokiReq.StartTs.UnixNano() - start := time.Unix(0, startNs-startNs%stepNs) - - endNs := lokiReq.EndTs.UnixNano() - if mod := endNs % stepNs; mod != 0 { - endNs += stepNs - mod - } - end := time.Unix(0, endNs) - - lokiReq = lokiReq.WithStartEnd(start, end).(*LokiRequest) - - // step is >= configured split interval, let us just split the query interval by step - if lokiReq.Step >= interval.Milliseconds() { - util.ForInterval(time.Duration(lokiReq.Step*1e6), lokiReq.StartTs, lokiReq.EndTs, false, func(start, end time.Time) { - reqs = append(reqs, &LokiRequest{ - Query: lokiReq.Query, - Limit: lokiReq.Limit, - Step: lokiReq.Step, - Interval: lokiReq.Interval, - Direction: lokiReq.Direction, - Path: lokiReq.Path, - StartTs: start, - EndTs: end, - Plan: lokiReq.Plan, - }) - }) - - return reqs, nil - } - - for start := lokiReq.StartTs; start.Before(lokiReq.EndTs); start = nextIntervalBoundary(start, r.GetStep(), interval).Add(time.Duration(r.GetStep()) * time.Millisecond) { - end := nextIntervalBoundary(start, r.GetStep(), interval) - if end.Add(time.Duration(r.GetStep())*time.Millisecond).After(lokiReq.EndTs) || end.Add(time.Duration(r.GetStep())*time.Millisecond) == lokiReq.EndTs { - end = lokiReq.EndTs - } - reqs = append(reqs, &LokiRequest{ - Query: lokiReq.Query, - Limit: lokiReq.Limit, - Step: lokiReq.Step, - Interval: lokiReq.Interval, - Direction: lokiReq.Direction, - Path: lokiReq.Path, - StartTs: start, - EndTs: end, - Plan: lokiReq.Plan, - }) - } - - return reqs, nil -} - -// Round up to the step before the next interval boundary. -func nextIntervalBoundary(t time.Time, step int64, interval time.Duration) time.Time { - stepNs := step * 1e6 - nsPerInterval := interval.Nanoseconds() - startOfNextInterval := ((t.UnixNano() / nsPerInterval) + 1) * nsPerInterval - // ensure that target is a multiple of steps away from the start time - target := startOfNextInterval - ((startOfNextInterval - t.UnixNano()) % stepNs) - if target == startOfNextInterval { - target -= stepNs - } - return time.Unix(0, target) -} diff --git a/pkg/querier/queryrange/split_by_interval_test.go b/pkg/querier/queryrange/split_by_interval_test.go index b236b88fb4d5..acf8c495becc 100644 --- a/pkg/querier/queryrange/split_by_interval_test.go +++ b/pkg/querier/queryrange/split_by_interval_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/prometheus/common/model" - "github.com/grafana/dskit/user" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -22,6 +22,8 @@ import ( "github.com/grafana/loki/pkg/querier/plan" "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/pkg/storage/config" + "github.com/grafana/loki/pkg/storage/stores/index/seriesvolume" + "github.com/grafana/loki/pkg/util" ) var nilMetrics = NewSplitByMetrics(nil) @@ -56,181 +58,393 @@ var testSchemasTSDB = func() []config.PeriodConfig { return confs }() -func Test_splitQuery(t *testing.T) { - buildLokiRequest := func(start, end time.Time) queryrangebase.Request { - return &LokiRequest{ - Query: `{app="foo"}`, - Limit: 1, - Step: 2, - StartTs: start, - EndTs: end, - Direction: logproto.BACKWARD, - Path: "/path", - Plan: &plan.QueryPlan{ - AST: syntax.MustParseExpr(`{app="foo"}`), - }, - } - } - - buildLokiRequestWithInterval := func(start, end time.Time) queryrangebase.Request { - return &LokiRequest{ - Query: `{app="foo"}`, - Limit: 1, - Interval: 2, - StartTs: start, - EndTs: end, - Direction: logproto.BACKWARD, - Path: "/path", - Plan: &plan.QueryPlan{ - AST: syntax.MustParseExpr(`{app="foo"}`), - }, - } - } - - buildLokiSeriesRequest := func(start, end time.Time) queryrangebase.Request { - return &LokiSeriesRequest{ - Match: []string{"match1"}, - StartTs: start, - EndTs: end, - Path: "/series", - Shards: []string{"shard1"}, - } - } - - buildLokiLabelNamesRequest := func(start, end time.Time) queryrangebase.Request { - return NewLabelRequest(start, end, "", "", "/lables") - } +var ( + // 62697274686461792063616b65 + refTime = time.Date(2023, 1, 15, 8, 5, 30, 123456789, time.UTC) + tenantID = "1" +) +func Test_splitQuery(t *testing.T) { type interval struct { start, end time.Time } + for requestType, tc := range map[string]struct { requestBuilderFunc func(start, end time.Time) queryrangebase.Request endTimeInclusive bool }{ - "LokiRequest": { - buildLokiRequest, - false, + "logs request": { + requestBuilderFunc: func(start, end time.Time) queryrangebase.Request { + return &LokiRequest{ + Query: `{app="foo"}`, + Limit: 1, + Step: 2, + StartTs: start, + EndTs: end, + Direction: logproto.BACKWARD, + Path: "/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"}`), + }, + } + }, + }, + "logs request with interval": { + requestBuilderFunc: func(start, end time.Time) queryrangebase.Request { + return &LokiRequest{ + Query: `{app="foo"}`, + Limit: 1, + Interval: 2, + StartTs: start, + EndTs: end, + Direction: logproto.BACKWARD, + Path: "/query", + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{app="foo"}`), + }, + } + }, + }, + "series request": { + requestBuilderFunc: func(start, end time.Time) queryrangebase.Request { + return &LokiSeriesRequest{ + Match: []string{"match1"}, + StartTs: start, + EndTs: end, + Path: "/series", + Shards: []string{"shard1"}, + } + }, + endTimeInclusive: true, + }, + "label names request": { + requestBuilderFunc: func(start, end time.Time) queryrangebase.Request { + return NewLabelRequest(start, end, `{foo="bar"}`, "", "/labels") + }, + endTimeInclusive: true, }, - "LokiRequestWithInterval": { - buildLokiRequestWithInterval, - false, + "label values request": { + requestBuilderFunc: func(start, end time.Time) queryrangebase.Request { + return NewLabelRequest(start, end, `{foo="bar"}`, "test", "/label/test/values") + }, + endTimeInclusive: true, }, - "LokiSeriesRequest": { - buildLokiSeriesRequest, - true, + "index stats request": { + requestBuilderFunc: func(start, end time.Time) queryrangebase.Request { + return &logproto.IndexStatsRequest{ + From: model.TimeFromUnix(start.Unix()), + Through: model.TimeFromUnix(end.Unix()), + Matchers: `{host="agent"}`, + } + }, + endTimeInclusive: true, }, - "LokiLabelNamesRequest": { - buildLokiLabelNamesRequest, - true, + "volume request": { + requestBuilderFunc: func(start, end time.Time) queryrangebase.Request { + return &logproto.VolumeRequest{ + From: model.TimeFromUnix(start.Unix()), + Through: model.TimeFromUnix(end.Unix()), + Matchers: `{host="agent"}`, + Limit: 5, + AggregateBy: seriesvolume.Series, + } + }, + endTimeInclusive: true, }, } { expectedSplitGap := time.Duration(0) if tc.endTimeInclusive { - expectedSplitGap = time.Millisecond + expectedSplitGap = util.SplitGap } - for name, intervals := range map[string]struct { - inp interval - expected []interval - }{ - "no_change": { - inp: interval{ - start: time.Unix(0, 0), - end: time.Unix(0, (1 * time.Hour).Nanoseconds()), - }, - expected: []interval{ - { + + t.Run(requestType, func(t *testing.T) { + for name, intervals := range map[string]struct { + input interval + expected []interval + splitInterval time.Duration + splitter splitter + }{ + "no change": { + input: interval{ start: time.Unix(0, 0), end: time.Unix(0, (1 * time.Hour).Nanoseconds()), }, + expected: []interval{ + { + start: time.Unix(0, 0), + end: time.Unix(0, (1 * time.Hour).Nanoseconds()), + }, + }, }, - }, - "align_start": { - inp: interval{ - start: time.Unix(0, (5 * time.Minute).Nanoseconds()), - end: time.Unix(0, (2 * time.Hour).Nanoseconds()), - }, - expected: []interval{ - { + "align start": { + input: interval{ start: time.Unix(0, (5 * time.Minute).Nanoseconds()), - end: time.Unix(0, (1 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), - }, - { - start: time.Unix(0, (1 * time.Hour).Nanoseconds()), end: time.Unix(0, (2 * time.Hour).Nanoseconds()), }, + expected: []interval{ + { + start: time.Unix(0, (5 * time.Minute).Nanoseconds()), + end: time.Unix(0, (1 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), + }, + { + start: time.Unix(0, (1 * time.Hour).Nanoseconds()), + end: time.Unix(0, (2 * time.Hour).Nanoseconds()), + }, + }, }, - }, - "align_end": { - inp: interval{ - start: time.Unix(0, 0), - end: time.Unix(0, (115 * time.Minute).Nanoseconds()), - }, - expected: []interval{ - { + "align end": { + input: interval{ start: time.Unix(0, 0), - end: time.Unix(0, (1 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), - }, - { - start: time.Unix(0, (1 * time.Hour).Nanoseconds()), end: time.Unix(0, (115 * time.Minute).Nanoseconds()), }, + expected: []interval{ + { + start: time.Unix(0, 0), + end: time.Unix(0, (1 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), + }, + { + start: time.Unix(0, (1 * time.Hour).Nanoseconds()), + end: time.Unix(0, (115 * time.Minute).Nanoseconds()), + }, + }, }, - }, - "align_both": { - inp: interval{ - start: time.Unix(0, (5 * time.Minute).Nanoseconds()), - end: time.Unix(0, (175 * time.Minute).Nanoseconds()), + "align both": { + input: interval{ + start: time.Unix(0, (5 * time.Minute).Nanoseconds()), + end: time.Unix(0, (175 * time.Minute).Nanoseconds()), + }, + expected: []interval{ + { + start: time.Unix(0, (5 * time.Minute).Nanoseconds()), + end: time.Unix(0, (1 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), + }, + { + start: time.Unix(0, (1 * time.Hour).Nanoseconds()), + end: time.Unix(0, (2 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), + }, + { + start: time.Unix(0, (2 * time.Hour).Nanoseconds()), + end: time.Unix(0, (175 * time.Minute).Nanoseconds()), + }, + }, }, - expected: []interval{ - { + "no align": { + input: interval{ start: time.Unix(0, (5 * time.Minute).Nanoseconds()), - end: time.Unix(0, (1 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), + end: time.Unix(0, (55 * time.Minute).Nanoseconds()), }, - { - start: time.Unix(0, (1 * time.Hour).Nanoseconds()), - end: time.Unix(0, (2 * time.Hour).Nanoseconds()).Add(-expectedSplitGap), + expected: []interval{ + { + start: time.Unix(0, (5 * time.Minute).Nanoseconds()), + end: time.Unix(0, (55 * time.Minute).Nanoseconds()), + }, }, - { - start: time.Unix(0, (2 * time.Hour).Nanoseconds()), - end: time.Unix(0, (175 * time.Minute).Nanoseconds()), + }, + "wholly within ingester query window": { + input: interval{ + start: refTime.Add(-time.Hour).Truncate(time.Second), + end: refTime, + }, + expected: []interval{ + { + start: refTime.Add(-time.Hour).Truncate(time.Second), + end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC), + end: refTime, + }, }, + splitInterval: time.Hour, + splitter: newDefaultSplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), }, - }, - "no_align": { - inp: interval{ - start: time.Unix(0, (5 * time.Minute).Nanoseconds()), - end: time.Unix(0, (55 * time.Minute).Nanoseconds()), + "partially within ingester query window": { + input: interval{ + // overlapping `query_ingesters_within` window of 3h + start: refTime.Add(-4 * time.Hour).Add(-30 * time.Minute).Truncate(time.Second), + end: refTime, + }, + expected: []interval{ + // regular intervals until `query_ingesters_within` window + { + start: refTime.Add(-4 * time.Hour).Add(-30 * time.Minute).Truncate(time.Second), + end: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 5, 5, 30, 123456789, time.UTC).Add(-expectedSplitGap), + }, + // and then different intervals for queries to ingesters + { + start: time.Date(2023, 1, 15, 5, 5, 30, 123456789, time.UTC), + end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC), + end: refTime, + }, + }, + splitInterval: time.Hour, + splitter: newDefaultSplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), }, - expected: []interval{ - { - start: time.Unix(0, (5 * time.Minute).Nanoseconds()), - end: time.Unix(0, (55 * time.Minute).Nanoseconds()), + "not within ingester query window": { + input: interval{ + // outside `query_ingesters_within` range of 3h + start: refTime.Add(-5 * time.Hour).Truncate(time.Second), + end: refTime.Add(-4 * time.Hour).Truncate(time.Second), + }, + expected: []interval{ + // regular intervals outside `query_ingesters_within` window + { + start: refTime.Add(-5 * time.Hour).Truncate(time.Second), + end: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC), + end: refTime.Add(-4 * time.Hour).Truncate(time.Second), + }, }, + splitInterval: time.Hour, + splitter: newDefaultSplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), }, - }, - } { - t.Run(fmt.Sprintf("%s - %s", name, requestType), func(t *testing.T) { - inp := tc.requestBuilderFunc(intervals.inp.start, intervals.inp.end) - var want []queryrangebase.Request - for _, interval := range intervals.expected { - want = append(want, tc.requestBuilderFunc(interval.start, interval.end)) - } - splits, err := splitByTime(inp, time.Hour) - require.NoError(t, err) - require.Equal(t, want, splits) - }) - } + "ingester query split by disabled": { + input: interval{ + // overlapping `query_ingesters_within` range of 3h + start: refTime.Add(-4 * time.Hour).Truncate(time.Second), + end: refTime, + }, + expected: []interval{ + // regular intervals only, since ingester split duration is 0 + { + start: refTime.Add(-4 * time.Hour).Truncate(time.Second), + end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC), + end: refTime, + }, + }, + splitInterval: time.Hour, + splitter: newDefaultSplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 0}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), + }, + "ingester query split enabled but query_store_only enabled too": { + input: interval{ + // overlapping `query_ingesters_within` range of 3h + start: refTime.Add(-4 * time.Hour).Truncate(time.Second), + end: refTime, + }, + expected: []interval{ + // regular intervals only, since ingester split duration is 0 + { + start: refTime.Add(-4 * time.Hour).Truncate(time.Second), + end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC), + end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap), + }, + { + start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC), + end: refTime, + }, + }, + splitInterval: time.Hour, + splitter: newDefaultSplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour, queryStoreOnly: true}, + ), + }, + } { + t.Run(name, func(t *testing.T) { + req := tc.requestBuilderFunc(intervals.input.start, intervals.input.end) + var want []queryrangebase.Request + for _, exp := range intervals.expected { + want = append(want, tc.requestBuilderFunc(exp.start, exp.end)) + } + + if intervals.splitInterval == 0 { + intervals.splitInterval = time.Hour + } + + if intervals.splitter == nil { + intervals.splitter = newDefaultSplitter(fakeLimits{}, nil) + } + + splits, err := intervals.splitter.split(refTime, []string{tenantID}, req, intervals.splitInterval) + require.NoError(t, err) + if !assert.Equal(t, want, splits) { + t.Logf("expected and actual do not match\n") + defer t.Fail() + + if len(want) != len(splits) { + t.Logf("expected %d splits, got %d\n", len(want), len(splits)) + return + } + + for j := 0; j < len(want); j++ { + exp := want[j] + act := splits[j] + equal := assert.Equal(t, exp, act) + t.Logf("\t#%d [matches: %v]: expected %q/%q got %q/%q\n", j, equal, exp.GetStart(), exp.GetEnd(), act.GetStart(), act.GetEnd()) + } + } + }) + } + }) } } func Test_splitMetricQuery(t *testing.T) { const seconds = 1e3 // 1e3 milliseconds per second. + const shortRange = `rate({app="foo"}[1m])` + const longRange = `rate({app="foo"}[7d])` + for i, tc := range []struct { - input *LokiRequest - expected []queryrangebase.Request - interval time.Duration + input *LokiRequest + expected []queryrangebase.Request + splitInterval time.Duration + splitter splitter }{ // the step is lower than the interval therefore we should split only once. { @@ -238,172 +452,172 @@ func Test_splitMetricQuery(t *testing.T) { StartTs: time.Unix(0, 0), EndTs: time.Unix(0, 60*time.Minute.Nanoseconds()), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(0, 60*time.Minute.Nanoseconds()), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 24 * time.Hour, + splitInterval: 24 * time.Hour, }, { input: &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(60*60, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(60*60, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 3 * time.Hour, + splitInterval: 3 * time.Hour, }, { input: &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(24*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(24*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 24 * time.Hour, + splitInterval: 24 * time.Hour, }, { input: &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(3*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(3*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 3 * time.Hour, + splitInterval: 3 * time.Hour, }, { input: &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(2*24*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix((24*3600)-15, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix((24 * 3600), 0), EndTs: time.Unix((2 * 24 * 3600), 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 24 * time.Hour, + splitInterval: 24 * time.Hour, }, { input: &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(2*3*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix((3*3600)-15, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix((3 * 3600), 0), EndTs: time.Unix((2 * 3 * 3600), 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 3 * time.Hour, + splitInterval: 3 * time.Hour, }, { input: &LokiRequest{ StartTs: time.Unix(3*3600, 0), EndTs: time.Unix(3*24*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(3*3600, 0), EndTs: time.Unix((24*3600)-15, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(24*3600, 0), EndTs: time.Unix((2*24*3600)-15, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(2*24*3600, 0), EndTs: time.Unix(3*24*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 24 * time.Hour, + splitInterval: 24 * time.Hour, }, { input: &LokiRequest{ StartTs: time.Unix(2*3600, 0), EndTs: time.Unix(3*3*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(2*3600, 0), EndTs: time.Unix((3*3600)-15, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(3*3600, 0), EndTs: time.Unix((2*3*3600)-15, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(2*3*3600, 0), EndTs: time.Unix(3*3*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 3 * time.Hour, + splitInterval: 3 * time.Hour, }, // step not a multiple of interval @@ -413,29 +627,29 @@ func Test_splitMetricQuery(t *testing.T) { StartTs: time.Unix(2*3600-9, 0), // 2h mod 17s = 9s EndTs: time.Unix(3*3*3600, 0), Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(2*3600-9, 0), EndTs: time.Unix((3*3600)-5, 0), // 3h mod 17s = 5s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix((3*3600)+12, 0), EndTs: time.Unix((2*3*3600)-10, 0), // 6h mod 17s = 10s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(2*3*3600+7, 0), EndTs: time.Unix(3*3*3600+2, 0), // 9h mod 17s = 2s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 3 * time.Hour, + splitInterval: 3 * time.Hour, }, // end time already step aligned { @@ -443,29 +657,29 @@ func Test_splitMetricQuery(t *testing.T) { StartTs: time.Unix(2*3600, 0), EndTs: time.Unix(3*3*3600+2, 0), // 9h mod 17s = 2s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(2*3600-9, 0), // 2h mod 17s = 9s EndTs: time.Unix((3*3600)-5, 0), // 3h mod 17s = 5s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix((3*3600)+12, 0), EndTs: time.Unix((2*3*3600)-10, 0), // 6h mod 17s = 10s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(2*3*3600+7, 0), EndTs: time.Unix(3*3*3600+2, 0), Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 3 * time.Hour, + splitInterval: 3 * time.Hour, }, // start & end time not aligned with step { @@ -473,29 +687,29 @@ func Test_splitMetricQuery(t *testing.T) { StartTs: time.Unix(2*3600, 0), EndTs: time.Unix(3*3*3600, 0), Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(2*3600-9, 0), // 2h mod 17s = 9s EndTs: time.Unix((3*3600)-5, 0), // 3h mod 17s = 5s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix((3*3600)+12, 0), EndTs: time.Unix((2*3*3600)-10, 0), // 6h mod 17s = 10s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(2*3*3600+7, 0), EndTs: time.Unix(3*3*3600+2, 0), // 9h mod 17s = 2s Step: 17 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 3 * time.Hour, + splitInterval: 3 * time.Hour, }, // step larger than split interval @@ -504,58 +718,58 @@ func Test_splitMetricQuery(t *testing.T) { StartTs: time.Unix(0, 0), EndTs: time.Unix(25*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(6*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(6*3600, 0), EndTs: time.Unix(12*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(12*3600, 0), EndTs: time.Unix(18*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(18*3600, 0), EndTs: time.Unix(24*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, &LokiRequest{ StartTs: time.Unix(24*3600, 0), EndTs: time.Unix(30*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 15 * time.Minute, + splitInterval: 15 * time.Minute, }, { input: &LokiRequest{ StartTs: time.Unix(1*3600, 0), EndTs: time.Unix(3*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(0, 0), EndTs: time.Unix(6*3600, 0), Step: 6 * 3600 * seconds, - Query: `rate({app="foo"}[1m])`, + Query: shortRange, }, }, - interval: 15 * time.Minute, + splitInterval: 15 * time.Minute, }, // reduce split by to 6h instead of 1h { @@ -579,7 +793,7 @@ func Test_splitMetricQuery(t *testing.T) { Query: `rate({app="foo"}[6h])`, }, }, - interval: 1 * time.Hour, + splitInterval: 1 * time.Hour, }, // range vector too large we don't want to split it { @@ -587,17 +801,222 @@ func Test_splitMetricQuery(t *testing.T) { StartTs: time.Unix(2*3600, 0), EndTs: time.Unix(3*3*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[7d])`, + Query: longRange, }, expected: []queryrangebase.Request{ &LokiRequest{ StartTs: time.Unix(2*3600, 0), EndTs: time.Unix(3*3*3600, 0), Step: 15 * seconds, - Query: `rate({app="foo"}[7d])`, + Query: longRange, + }, + }, + splitInterval: 15 * time.Minute, + }, + // query is wholly within ingester query window + { + input: &LokiRequest{ + StartTs: refTime.Add(-time.Hour), + EndTs: refTime, + Step: 15 * seconds, + Query: shortRange, + }, + expected: []queryrangebase.Request{ + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 7, 05, 30, 0, time.UTC), // start time is aligned down to step of 15s + EndTs: time.Date(2023, 1, 15, 7, 29, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 8, 5, 45, 0, time.UTC), // end time is aligned up to step of 15s + Step: 15 * seconds, + Query: shortRange, }, }, - interval: 15 * time.Minute, + splitInterval: time.Hour, + splitter: newMetricQuerySplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), + }, + // query is partially within ingester query window + { + input: &LokiRequest{ + StartTs: refTime.Add(-4 * time.Hour).Add(-30 * time.Minute), + EndTs: refTime, + Step: 15 * seconds, + Query: shortRange, + }, + expected: []queryrangebase.Request{ + // regular intervals until `query_ingesters_within` window + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 3, 35, 30, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 3, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 4, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 5, 5, 15, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + // and then different intervals for queries to ingesters + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 5, 5, 30, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 5, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 7, 29, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 8, 5, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + }, + splitInterval: time.Hour, + splitter: newMetricQuerySplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), + }, + // not within ingester query window + { + input: &LokiRequest{ + StartTs: refTime.Add(-5 * time.Hour), + EndTs: refTime.Add(-4 * time.Hour), + Step: 15 * seconds, + Query: shortRange, + }, + expected: []queryrangebase.Request{ + // regular intervals until `query_ingesters_within` window + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 3, 5, 30, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 3, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 4, 5, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + }, + splitInterval: time.Hour, + splitter: newMetricQuerySplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), + }, + // ingester query split by disabled + { + input: &LokiRequest{ + StartTs: refTime.Add(-4 * time.Hour), + EndTs: refTime, + Step: 15 * seconds, + Query: shortRange, + }, + expected: []queryrangebase.Request{ + // regular intervals only, since ingester split duration is 0 + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 4, 5, 30, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 4, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 5, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 6, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 7, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 8, 5, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + }, + splitInterval: time.Hour, + splitter: newMetricQuerySplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 0}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour}, + ), + }, + // ingester query split by enabled, but query_store_only is enabled too + { + input: &LokiRequest{ + StartTs: refTime.Add(-4 * time.Hour), + EndTs: refTime, + Step: 15 * seconds, + Query: shortRange, + }, + expected: []queryrangebase.Request{ + // regular intervals only, since ingester split duration is 0 + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 4, 5, 30, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 4, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 5, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 6, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 7, 59, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + &LokiRequest{ + StartTs: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC), + EndTs: time.Date(2023, 1, 15, 8, 5, 45, 0, time.UTC), + Step: 15 * seconds, + Query: shortRange, + }, + }, + splitInterval: time.Hour, + splitter: newMetricQuerySplitter( + fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}}, + ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour, queryStoreOnly: true}, + ), }, } { // Set query plans @@ -612,13 +1031,29 @@ func Test_splitMetricQuery(t *testing.T) { } t.Run(strconv.Itoa(i), func(t *testing.T) { - splits, err := splitMetricByTime(tc.input, tc.interval) + ms := newMetricQuerySplitter(fakeLimits{}, nil) + if tc.splitter != nil { + ms = tc.splitter.(*metricQuerySplitter) + } + + splits, err := ms.split(refTime, []string{tenantID}, tc.input, tc.splitInterval) require.NoError(t, err) - for i, s := range splits { - s := s.(*LokiRequest) - t.Logf(" want: %d start:%s end:%s \n", i, s.StartTs, s.EndTs) + if !assert.Equal(t, tc.expected, splits) { + t.Logf("expected and actual do not match\n") + defer t.Fail() + + if len(tc.expected) != len(splits) { + t.Logf("expected %d splits, got %d\n", len(tc.expected), len(splits)) + return + } + + for j := 0; j < len(tc.expected); j++ { + exp := tc.expected[j] + act := splits[j] + equal := assert.Equal(t, exp, act) + t.Logf("\t#%d [matches: %v]: expected %q/%q got %q/%q\n", j, equal, exp.GetStart(), exp.GetEnd(), act.GetStart(), act.GetEnd()) + } } - require.Equal(t, tc.expected, splits) }) } @@ -646,12 +1081,13 @@ func Test_splitByInterval_Do(t *testing.T) { }, nil }) + defSplitter := newDefaultSplitter(fakeLimits{}, nil) l := WithSplitByLimits(fakeLimits{maxQueryParallelism: 1}, time.Hour) split := SplitByIntervalMiddleware( testSchemas, l, DefaultCodec, - splitByTime, + defSplitter, nilMetrics, ).Wrap(next) @@ -834,11 +1270,12 @@ func Test_series_splitByInterval_Do(t *testing.T) { "1": time.Hour, }, } + defSplitter := newDefaultSplitter(fakeLimits{}, nil) split := SplitByIntervalMiddleware( testSchemas, l, DefaultCodec, - splitByTime, + defSplitter, nilMetrics, ).Wrap(next) @@ -888,12 +1325,13 @@ func Test_series_splitByInterval_Do(t *testing.T) { func Test_seriesvolume_splitByInterval_Do(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "1") + defSplitter := newDefaultSplitter(fakeLimits{}, nil) setup := func(next queryrangebase.Handler, l Limits) queryrangebase.Handler { return SplitByIntervalMiddleware( testSchemas, l, DefaultCodec, - splitByTime, + defSplitter, nilMetrics, ).Wrap(next) } @@ -1050,11 +1488,12 @@ func Test_ExitEarly(t *testing.T) { }) l := WithSplitByLimits(fakeLimits{maxQueryParallelism: 1}, time.Hour) + defSplitter := newDefaultSplitter(fakeLimits{}, nil) split := SplitByIntervalMiddleware( testSchemas, l, DefaultCodec, - splitByTime, + defSplitter, nilMetrics, ).Wrap(next) @@ -1132,11 +1571,12 @@ func Test_DoesntDeadlock(t *testing.T) { }) l := WithSplitByLimits(fakeLimits{maxQueryParallelism: n}, time.Hour) + defSplitter := newDefaultSplitter(fakeLimits{}, nil) split := SplitByIntervalMiddleware( testSchemas, l, DefaultCodec, - splitByTime, + defSplitter, nilMetrics, ).Wrap(next) diff --git a/pkg/querier/queryrange/splitters.go b/pkg/querier/queryrange/splitters.go new file mode 100644 index 000000000000..0aaecf35cb96 --- /dev/null +++ b/pkg/querier/queryrange/splitters.go @@ -0,0 +1,297 @@ +package queryrange + +import ( + "time" + + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase" + "github.com/grafana/loki/pkg/util" + "github.com/grafana/loki/pkg/util/validation" +) + +type splitter interface { + split(execTime time.Time, tenantIDs []string, request queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) +} + +type defaultSplitter struct { + limits Limits + iqo util.IngesterQueryOptions +} + +func newDefaultSplitter(limits Limits, iqo util.IngesterQueryOptions) *defaultSplitter { + return &defaultSplitter{limits, iqo} +} + +func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) { + var ( + reqs []queryrangebase.Request + factory func(start, end time.Time) + endTimeInclusive = true + ) + + switch r := req.(type) { + case *LokiRequest: + endTimeInclusive = false + factory = func(start, end time.Time) { + reqs = append(reqs, &LokiRequest{ + Query: r.Query, + Limit: r.Limit, + Step: r.Step, + Interval: r.Interval, + Direction: r.Direction, + Path: r.Path, + StartTs: start, + EndTs: end, + Plan: r.Plan, + }) + } + case *LokiSeriesRequest: + // metadata queries have end time inclusive. + // Set endTimeInclusive to true so that ForInterval keeps a gap of 1ms between splits to + // avoid querying duplicate data in adjacent queries. + factory = func(start, end time.Time) { + reqs = append(reqs, &LokiSeriesRequest{ + Match: r.Match, + Path: r.Path, + StartTs: start, + EndTs: end, + Shards: r.Shards, + }) + } + case *LabelRequest: + // metadata queries have end time inclusive. + // Set endTimeInclusive to true so that ForInterval keeps a gap of 1ms between splits to + // avoid querying duplicate data in adjacent queries. + factory = func(start, end time.Time) { + reqs = append(reqs, NewLabelRequest(start, end, r.Query, r.Name, r.Path())) + } + case *logproto.IndexStatsRequest: + factory = func(start, end time.Time) { + reqs = append(reqs, &logproto.IndexStatsRequest{ + From: model.TimeFromUnix(start.Unix()), + Through: model.TimeFromUnix(end.Unix()), + Matchers: r.GetMatchers(), + }) + } + case *logproto.VolumeRequest: + factory = func(start, end time.Time) { + reqs = append(reqs, &logproto.VolumeRequest{ + From: model.TimeFromUnix(start.Unix()), + Through: model.TimeFromUnix(end.Unix()), + Matchers: r.GetMatchers(), + Limit: r.Limit, + TargetLabels: r.TargetLabels, + AggregateBy: r.AggregateBy, + }) + } + default: + return nil, nil + } + + var ( + ingesterSplits []queryrangebase.Request + origStart = req.GetStart().UTC() + origEnd = req.GetEnd().UTC() + ) + + start, end, needsIngesterSplits := ingesterQueryBounds(execTime, s.iqo, req) + + if ingesterQueryInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.IngesterQuerySplitDuration); ingesterQueryInterval != 0 && needsIngesterSplits { + // perform splitting using special interval (`split_ingester_queries_by_interval`) + util.ForInterval(ingesterQueryInterval, start, end, endTimeInclusive, factory) + + // rebound after ingester queries have been split out + end = start + start = req.GetStart().UTC() + if endTimeInclusive { + end = end.Add(-util.SplitGap) + } + + // query only overlaps ingester query window, nothing more to do + if start.After(end) || start.Equal(end) { + return reqs, nil + } + + // copy the splits, reset the results + ingesterSplits = reqs + reqs = nil + } else { + start = origStart + end = origEnd + } + + // perform splitting over the rest of the time range + util.ForInterval(interval, origStart, end, endTimeInclusive, factory) + + // move the ingester splits to the end to maintain correct order + reqs = append(reqs, ingesterSplits...) + return reqs, nil +} + +type metricQuerySplitter struct { + limits Limits + iqo util.IngesterQueryOptions +} + +func newMetricQuerySplitter(limits Limits, iqo util.IngesterQueryOptions) *metricQuerySplitter { + return &metricQuerySplitter{limits, iqo} +} + +// reduceSplitIntervalForRangeVector reduces the split interval for a range query based on the duration of the range vector. +// Large range vector durations will not be split into smaller intervals because it can cause the queries to be slow by over-processing data. +func (s *metricQuerySplitter) reduceSplitIntervalForRangeVector(r *LokiRequest, interval time.Duration) (time.Duration, error) { + maxRange, _, err := maxRangeVectorAndOffsetDuration(r.Plan.AST) + if err != nil { + return 0, err + } + if maxRange > interval { + return maxRange, nil + } + return interval, nil +} + +// Round up to the step before the next interval boundary. +func (s *metricQuerySplitter) nextIntervalBoundary(t time.Time, step int64, interval time.Duration) time.Time { + stepNs := step * 1e6 + nsPerInterval := interval.Nanoseconds() + startOfNextInterval := ((t.UnixNano() / nsPerInterval) + 1) * nsPerInterval + // ensure that target is a multiple of steps away from the start time + target := startOfNextInterval - ((startOfNextInterval - t.UnixNano()) % stepNs) + if target == startOfNextInterval { + target -= stepNs + } + return time.Unix(0, target) +} + +func (s *metricQuerySplitter) split(execTime time.Time, tenantIDs []string, r queryrangebase.Request, interval time.Duration) ([]queryrangebase.Request, error) { + var reqs []queryrangebase.Request + + lokiReq := r.(*LokiRequest) + + interval, err := s.reduceSplitIntervalForRangeVector(lokiReq, interval) + if err != nil { + return nil, err + } + + start, end := s.alignStartEnd(r.GetStep(), lokiReq.StartTs, lokiReq.EndTs) + + lokiReq = lokiReq.WithStartEnd(start, end).(*LokiRequest) + + factory := func(start, end time.Time) { + reqs = append(reqs, &LokiRequest{ + Query: lokiReq.Query, + Limit: lokiReq.Limit, + Step: lokiReq.Step, + Interval: lokiReq.Interval, + Direction: lokiReq.Direction, + Path: lokiReq.Path, + StartTs: start, + EndTs: end, + Plan: lokiReq.Plan, + }) + } + + // step is >= configured split interval, let us just split the query interval by step + // TODO this is likely buggy when step >= query range, how should we handle this? + if lokiReq.Step >= interval.Milliseconds() { + util.ForInterval(time.Duration(lokiReq.Step*1e6), lokiReq.StartTs, lokiReq.EndTs, false, factory) + + return reqs, nil + } + + var ( + ingesterSplits []queryrangebase.Request + needsIngesterSplits bool + ) + + origStart := start + origEnd := end + + start, end, needsIngesterSplits = ingesterQueryBounds(execTime, s.iqo, lokiReq) + start, end = s.alignStartEnd(r.GetStep(), start, end) + + if ingesterQueryInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.IngesterQuerySplitDuration); ingesterQueryInterval != 0 && needsIngesterSplits { + // perform splitting using special interval (`split_ingester_queries_by_interval`) + s.buildMetricSplits(lokiReq.GetStep(), ingesterQueryInterval, start, end, factory) + + // rebound after ingester queries have been split out + // + // the end time should now be the boundary of the `query_ingester_within` window, which is "start" currently; + // but since start is already step-aligned we need to subtract 1ns to align it down by 1 more step so that we + // get a consistent step between splits + end, _ = s.alignStartEnd(r.GetStep(), start.Add(-time.Nanosecond), end) + // we restore the previous start time (the start time of the query) + start = origStart + + // query only overlaps ingester query window, nothing more to do + if start.After(end) || start.Equal(end) { + return reqs, nil + } + + // copy the splits, reset the results + ingesterSplits = reqs + reqs = nil + } else { + start = origStart + end = origEnd + } + + // perform splitting over the rest of the time range + s.buildMetricSplits(lokiReq.GetStep(), interval, start, end, factory) + + // move the ingester splits to the end to maintain correct order + reqs = append(reqs, ingesterSplits...) + + return reqs, nil +} + +func (s *metricQuerySplitter) alignStartEnd(step int64, start, end time.Time) (time.Time, time.Time) { + // step align start and end time of the query. Start time is rounded down and end time is rounded up. + stepNs := step * 1e6 + startNs := start.UnixNano() + + endNs := end.UnixNano() + if mod := endNs % stepNs; mod != 0 { + endNs += stepNs - mod + } + + return time.Unix(0, startNs-startNs%stepNs), time.Unix(0, endNs) +} + +func (s *metricQuerySplitter) buildMetricSplits(step int64, interval time.Duration, start, end time.Time, factory func(start, end time.Time)) { + for splStart := start; splStart.Before(end); splStart = s.nextIntervalBoundary(splStart, step, interval).Add(time.Duration(step) * time.Millisecond) { + splEnd := s.nextIntervalBoundary(splStart, step, interval) + if splEnd.Add(time.Duration(step)*time.Millisecond).After(end) || splEnd.Add(time.Duration(step)*time.Millisecond) == end { + splEnd = end + } + factory(splStart, splEnd) + } +} + +// ingesterQueryBounds determines if we need to split time ranges overlapping the ingester query window (`query_ingesters_within`) +// and retrieve the bounds for those specific splits +func ingesterQueryBounds(execTime time.Time, iqo util.IngesterQueryOptions, req queryrangebase.Request) (time.Time, time.Time, bool) { + start, end := req.GetStart().UTC(), req.GetEnd().UTC() + + // ingesters are not queried, nothing to do + if iqo == nil || iqo.QueryStoreOnly() { + return start, end, false + } + + windowSize := iqo.QueryIngestersWithin() + ingesterWindow := execTime.UTC().Add(-windowSize) + + // clamp to the start time + if ingesterWindow.Before(start) { + ingesterWindow = start + } + + // query range does not overlap with ingester query window, nothing to do + if end.Before(ingesterWindow) { + return start, end, false + } + + return ingesterWindow, end, true +} diff --git a/pkg/querier/queryrange/volume_cache.go b/pkg/querier/queryrange/volume_cache.go index 954c642ffef8..147d61912db9 100644 --- a/pkg/querier/queryrange/volume_cache.go +++ b/pkg/querier/queryrange/volume_cache.go @@ -101,6 +101,7 @@ func NewVolumeCacheMiddleware( merger queryrangebase.Merger, c cache.Cache, cacheGenNumberLoader queryrangebase.CacheGenNumberLoader, + iqo util.IngesterQueryOptions, shouldCache queryrangebase.ShouldCacheFn, parallelismForReq queryrangebase.ParallelismForReqFn, retentionEnabled bool, @@ -110,7 +111,7 @@ func NewVolumeCacheMiddleware( return queryrangebase.NewResultsCacheMiddleware( log, c, - VolumeSplitter{cacheKeyLimits{limits, transformer}}, + VolumeSplitter{cacheKeyLimits{limits, transformer, iqo}}, limits, merger, VolumeExtractor{}, diff --git a/pkg/querier/queryrange/volume_cache_test.go b/pkg/querier/queryrange/volume_cache_test.go index 904e0fc7c3a9..038d8fa925f5 100644 --- a/pkg/querier/queryrange/volume_cache_test.go +++ b/pkg/querier/queryrange/volume_cache_test.go @@ -39,6 +39,7 @@ func TestVolumeCache(t *testing.T) { c, nil, nil, + nil, func(_ context.Context, _ []string, _ queryrangebase.Request) int { return 1 }, @@ -304,6 +305,7 @@ func TestVolumeCache_RecentData(t *testing.T) { c, nil, nil, + nil, func(_ context.Context, _ []string, _ queryrangebase.Request) int { return 1 }, diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index 2eaeb576b318..946aeaf54495 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -2,6 +2,7 @@ package v1 import ( "context" + "fmt" "math" "time" @@ -82,75 +83,82 @@ func prefixedToken(ngram int, chk logproto.ChunkRef) ([]byte, int) { } // PopulateSeriesWithBloom is intended to be called on the write path, and is used to populate the bloom filter for a given series. -func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks []chunk.Chunk) error { +func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *SeriesWithBloom, chunks Iterator[[]chunk.Chunk]) error { startTime := time.Now().UnixMilli() level.Debug(util_log.Logger).Log("msg", "PopulateSeriesWithBloom") clearCache(bt.cache) chunkTotalUncompressedSize := 0 - for idx := range chunks { - lc := chunks[idx].Data.(*chunkenc.Facade).LokiChunk() - tokenBuf, prefixLn := prefixedToken(bt.lineTokenizer.N, chunks[idx].ChunkRef) - chunkTotalUncompressedSize += lc.UncompressedSize() - - itr, err := lc.Iterator( - context.Background(), - time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps? - time.Unix(0, math.MaxInt64), - logproto.FORWARD, - log.NewNoopPipeline().ForStream(chunks[idx].Metric), - ) - if err != nil { - level.Error(util_log.Logger).Log("msg", "chunk iterator cannot be created", "err", err) - return err - } - - defer itr.Close() - - for itr.Next() && itr.Error() == nil { - chunkTokenizer := NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(itr.Entry().Line)) - for chunkTokenizer.Next() { - tok := chunkTokenizer.At() - if tok != nil { - str := string(tok) - _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters - if !found { - bt.cache[str] = nil - - seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) - - if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other - clearCache(bt.cache) + for chunks.Next() { + chunksBatch := chunks.At() + for idx := range chunksBatch { + lc := chunksBatch[idx].Data.(*chunkenc.Facade).LokiChunk() + tokenBuf, prefixLn := prefixedToken(bt.lineTokenizer.N, chunksBatch[idx].ChunkRef) + chunkTotalUncompressedSize += lc.UncompressedSize() + + itr, err := lc.Iterator( + context.Background(), + time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps? + time.Unix(0, math.MaxInt64), + logproto.FORWARD, + log.NewNoopPipeline().ForStream(chunksBatch[idx].Metric), + ) + if err != nil { + level.Error(util_log.Logger).Log("msg", "chunk iterator cannot be created", "err", err) + return err + } + + defer itr.Close() + + for itr.Next() && itr.Error() == nil { + chunkTokenizer := NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(itr.Entry().Line)) + for chunkTokenizer.Next() { + tok := chunkTokenizer.At() + if tok != nil { + str := string(tok) + _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters + if !found { + bt.cache[str] = nil + + seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) + + if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other + clearCache(bt.cache) + } } } } - } - lineTokenizer := bt.lineTokenizer.Tokens(itr.Entry().Line) - for lineTokenizer.Next() { - tok := lineTokenizer.At() - if tok != nil { - str := string(tok) - _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters - if !found { - bt.cache[str] = nil - - seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) - - if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other - clearCache(bt.cache) + lineTokenizer := bt.lineTokenizer.Tokens(itr.Entry().Line) + for lineTokenizer.Next() { + tok := lineTokenizer.At() + if tok != nil { + str := string(tok) + _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters + if !found { + bt.cache[str] = nil + + seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) + + if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other + clearCache(bt.cache) + } } } } - } - } - seriesWithBloom.Series.Chunks = append(seriesWithBloom.Series.Chunks, ChunkRef{ - Start: chunks[idx].From, - End: chunks[idx].Through, - Checksum: chunks[idx].Checksum, - }) - } // for each chunk + } + seriesWithBloom.Series.Chunks = append(seriesWithBloom.Series.Chunks, ChunkRef{ + Start: chunksBatch[idx].From, + End: chunksBatch[idx].Through, + Checksum: chunksBatch[idx].Checksum, + }) + } // for each chunk + } + if err := chunks.Err(); err != nil { + level.Error(util_log.Logger).Log("msg", "error downloading chunks batch", "err", err) + return fmt.Errorf("error downloading chunks batch: %w", err) + } endTime := time.Now().UnixMilli() diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index f22c74165124..0fad08e78f08 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -123,7 +123,7 @@ func TestPopulateSeriesWithBloom(t *testing.T) { Series: &series, } - err := bt.PopulateSeriesWithBloom(&swb, chunks) + err := bt.PopulateSeriesWithBloom(&swb, NewSliceIter([][]chunk.Chunk{chunks})) require.NoError(t, err) tokenizer := NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip) itr := tokenizer.Tokens(testLine) @@ -171,7 +171,7 @@ func BenchmarkPopulateSeriesWithBloom(b *testing.B) { Series: &series, } - err := bt.PopulateSeriesWithBloom(&swb, chunks) + err := bt.PopulateSeriesWithBloom(&swb, NewSliceIter([][]chunk.Chunk{chunks})) require.NoError(b, err) } } diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go index 622e076f97b0..e67dce4c9754 100644 --- a/pkg/storage/bloom/v1/builder_test.go +++ b/pkg/storage/bloom/v1/builder_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/grafana/loki/pkg/chunkenc" @@ -193,3 +194,170 @@ func TestMergeBuilder(t *testing.T) { querier, ) } + +func TestBlockReset(t *testing.T) { + numSeries := 100 + numKeysPerSeries := 10000 + data, _ := mkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 1, 0xffff, 0, 10000) + + indexBuf := bytes.NewBuffer(nil) + bloomsBuf := bytes.NewBuffer(nil) + writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) + reader := NewByteReader(indexBuf, bloomsBuf) + + schema := Schema{ + version: DefaultSchemaVersion, + encoding: chunkenc.EncSnappy, + nGramLength: 10, + nGramSkip: 2, + } + + builder, err := NewBlockBuilder( + BlockOptions{ + schema: schema, + SeriesPageSize: 100, + BloomPageSize: 10 << 10, + }, + writer, + ) + + require.Nil(t, err) + itr := NewSliceIter[SeriesWithBloom](data) + _, err = builder.BuildFrom(itr) + require.Nil(t, err) + block := NewBlock(reader) + querier := NewBlockQuerier(block) + + rounds := make([][]model.Fingerprint, 2) + + for i := 0; i < len(rounds); i++ { + for querier.Next() { + rounds[i] = append(rounds[i], querier.At().Series.Fingerprint) + } + + err = querier.Seek(0) // reset at end + require.Nil(t, err) + } + + require.Equal(t, rounds[0], rounds[1]) +} + +// This test is a basic roundtrip test for the merge builder. +// It creates one set of blocks with the same (duplicate) data, and another set of blocks with +// disjoint data. It then merges the two sets of blocks and ensures that the merged blocks contain +// one copy of the first set (duplicate data) and one copy of the second set (disjoint data). +func TestMergeBuilder_Roundtrip(t *testing.T) { + numSeries := 100 + numKeysPerSeries := 100 + minTs, maxTs := model.Time(0), model.Time(10000) + xs, _ := mkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, minTs, maxTs) + + var data [][]*SeriesWithBloom + + // First, we build the blocks + + sets := []int{ + 2, // 2 blocks containint the same data + 1, // 1 block containing disjoint data + } + + for i, copies := range sets { + for j := 0; j < copies; j++ { + // references for linking in memory reader+writer + indexBuf := bytes.NewBuffer(nil) + bloomsBuf := bytes.NewBuffer(nil) + + writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) + reader := NewByteReader(indexBuf, bloomsBuf) + + builder, err := NewBlockBuilder( + BlockOptions{ + schema: Schema{ + version: DefaultSchemaVersion, + encoding: chunkenc.EncSnappy, + }, + SeriesPageSize: 100, + BloomPageSize: 10 << 10, + }, + writer, + ) + + require.Nil(t, err) + // each set of copies gets a different slice of the data + minIdx, maxIdx := i*len(xs)/len(sets), (i+1)*len(xs)/len(sets) + itr := NewSliceIter[SeriesWithBloom](xs[minIdx:maxIdx]) + _, err = builder.BuildFrom(itr) + require.Nil(t, err) + block := NewBlock(reader) + querier := NewBlockQuerier(block) + + // rather than use the block querier directly, collect it's data + // so we can use it in a few places later + var tmp []*SeriesWithBloom + for querier.Next() { + tmp = append(tmp, querier.At()) + } + data = append(data, tmp) + } + } + + // we keep 2 copies of the data as iterators. One for the blocks, and one for the "store" + // which will force it to reference the same series + var blocks []PeekingIterator[*SeriesWithBloom] + var store []PeekingIterator[*SeriesWithBloom] + + for _, x := range data { + blocks = append(blocks, NewPeekingIter[*SeriesWithBloom](NewSliceIter[*SeriesWithBloom](x))) + store = append(store, NewPeekingIter[*SeriesWithBloom](NewSliceIter[*SeriesWithBloom](x))) + } + + orderedStore := NewHeapIterForSeriesWithBloom(store...) + dedupedStore := NewDedupingIter[*SeriesWithBloom, *Series]( + func(a *SeriesWithBloom, b *Series) bool { + return a.Series.Fingerprint == b.Fingerprint + }, + func(swb *SeriesWithBloom) *Series { + return swb.Series + }, + func(a *SeriesWithBloom, b *Series) *Series { + if len(a.Series.Chunks) > len(b.Chunks) { + return a.Series + } + return b + }, + NewPeekingIter[*SeriesWithBloom](orderedStore), + ) + + // build the new block from the old ones + indexBuf, bloomBuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) + writer := NewMemoryBlockWriter(indexBuf, bloomBuf) + reader := NewByteReader(indexBuf, bloomBuf) + mb := NewMergeBuilder( + blocks, + dedupedStore, + func(s *Series, b *Bloom) error { + // We're not actually indexing new data in this test + return nil + }, + ) + builder, err := NewBlockBuilder(NewBlockOptions(4, 0), writer) + require.Nil(t, err) + + checksum, err := mb.Build(builder) + require.Nil(t, err) + require.Equal(t, uint32(0x779633b5), checksum) + + // ensure the new block contains one copy of all the data + // by comparing it against an iterator over the source data + mergedBlockQuerier := NewBlockQuerier(NewBlock(reader)) + sourceItr := NewSliceIter[*SeriesWithBloom](PointerSlice[SeriesWithBloom](xs)) + + EqualIterators[*SeriesWithBloom]( + t, + func(a, b *SeriesWithBloom) { + require.Equal(t, a.Series.Fingerprint, b.Series.Fingerprint) + }, + sourceItr, + mergedBlockQuerier, + ) +} diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go index fb74e6a1638d..1ccc372248a8 100644 --- a/pkg/storage/bloom/v1/index.go +++ b/pkg/storage/bloom/v1/index.go @@ -297,8 +297,9 @@ func (d *SeriesPageDecoder) Next() bool { } func (d *SeriesPageDecoder) Seek(fp model.Fingerprint) { - if fp > d.header.ThroughFp || fp < d.header.FromFp { - // shortcut: we know the fingerprint is not in this page + if fp > d.header.ThroughFp { + // shortcut: we know the fingerprint is too large so nothing in this page + // will match the seek call, which returns the first found fingerprint >= fp. // so masquerade the index as if we've already iterated through d.i = d.header.NumSeries } diff --git a/pkg/storage/bloom/v1/index_querier.go b/pkg/storage/bloom/v1/index_querier.go index cbd8da7579c3..2d653e35bad9 100644 --- a/pkg/storage/bloom/v1/index_querier.go +++ b/pkg/storage/bloom/v1/index_querier.go @@ -55,7 +55,7 @@ func (it *LazySeriesIter) Seek(fp model.Fingerprint) error { page := it.b.index.pageHeaders[desiredPage] switch { - case desiredPage == len(it.b.index.pageHeaders), page.FromFp > fp: + case desiredPage == len(it.b.index.pageHeaders): // no overlap exists, either because no page was found with a throughFP >= fp // or because the first page that was found has a fromFP > fp, // meaning successive pages would also have a fromFP > fp diff --git a/pkg/storage/bloom/v1/merge_test.go b/pkg/storage/bloom/v1/merge_test.go index c1600f00dcbe..8f2bf02f4c2d 100644 --- a/pkg/storage/bloom/v1/merge_test.go +++ b/pkg/storage/bloom/v1/merge_test.go @@ -33,6 +33,36 @@ func TestMergeBlockQuerier_NonOverlapping(t *testing.T) { require.False(t, mbq.Next()) } +func TestMergeBlockQuerier_Duplicate(t *testing.T) { + var ( + numSeries = 100 + numKeysPerSeries = 10000 + numQueriers = 2 + queriers []PeekingIterator[*SeriesWithBloom] + data, _ = mkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, 0, 10000) + ) + for i := 0; i < numQueriers; i++ { + queriers = append( + queriers, + NewPeekingIter[*SeriesWithBloom]( + NewSliceIter[*SeriesWithBloom]( + PointerSlice[SeriesWithBloom](data), + ), + ), + ) + } + + mbq := NewHeapIterForSeriesWithBloom(queriers...) + + for i := 0; i < numSeries*2; i++ { + require.True(t, mbq.Next()) + exp := data[i/2].Series.Fingerprint + got := mbq.At().Series.Fingerprint + require.Equal(t, exp, got, "on iteration %d", i) + } + require.False(t, mbq.Next()) +} + func TestMergeBlockQuerier_Overlapping(t *testing.T) { var ( numSeries = 100 diff --git a/pkg/storage/chunk/cache/background.go b/pkg/storage/chunk/cache/background.go index 16feb62551f5..299444c6a54e 100644 --- a/pkg/storage/chunk/cache/background.go +++ b/pkg/storage/chunk/cache/background.go @@ -148,8 +148,11 @@ func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byt } size := bgWrite.size() - newSize := c.size.Load() + int64(size) + // prospectively add new size + newSize := c.size.Add(int64(size)) if newSize > int64(c.sizeLimit) { + // subtract it since we've exceeded the limit + c.size.Sub(int64(size)) c.failStore(ctx, size, num, "queue at byte size limit") return nil } diff --git a/pkg/storage/chunk/cache/resultscache/cache.go b/pkg/storage/chunk/cache/resultscache/cache.go index 527aea84bcd1..d05d71837404 100644 --- a/pkg/storage/chunk/cache/resultscache/cache.go +++ b/pkg/storage/chunk/cache/resultscache/cache.go @@ -50,7 +50,7 @@ type ResultsCache struct { next Handler cache cache.Cache limits Limits - splitter KeyGenerator + keyGen KeyGenerator cacheGenNumberLoader CacheGenNumberLoader retentionEnabled bool extractor Extractor @@ -86,7 +86,7 @@ func NewResultsCache( next: next, cache: c, limits: limits, - splitter: keyGen, + keyGen: keyGen, cacheGenNumberLoader: cacheGenNumberLoader, retentionEnabled: retentionEnabled, extractor: extractor, @@ -115,7 +115,7 @@ func (s ResultsCache) Do(ctx context.Context, r Request) (Response, error) { } var ( - key = s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r) + key = s.keyGen.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r) extents []Extent response Response ) diff --git a/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go index a28c76c12f78..ffe715c857ec 100644 --- a/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/block_downloader_test.go @@ -74,7 +74,7 @@ func Test_blockDownloader_downloadBlocks(t *testing.T) { func Test_blockDownloader_downloadBlock(t *testing.T) { tests := map[string]struct { cacheEnabled bool - expectedTotalGetBlocksCalls int + expectedTotalGetBlocksCalls int32 }{ "cache disabled": { cacheEnabled: false, @@ -129,7 +129,7 @@ func Test_blockDownloader_downloadBlock(t *testing.T) { case <-done: } require.Len(t, downloadedBlocks, 20, "all 20 block must be downloaded") - require.Equal(t, 20, blockClient.getBlockCalls) + require.Equal(t, int32(20), blockClient.getBlockCalls.Load()) blocksCh, errorsCh = downloader.downloadBlocks(context.Background(), "fake", blockReferences) downloadedBlocks = make(map[string]any, len(blockReferences)) @@ -150,7 +150,7 @@ func Test_blockDownloader_downloadBlock(t *testing.T) { case <-done: } require.Len(t, downloadedBlocks, 20, "all 20 block must be downloaded") - require.Equal(t, testData.expectedTotalGetBlocksCalls, blockClient.getBlockCalls) + require.Equal(t, testData.expectedTotalGetBlocksCalls, blockClient.getBlockCalls.Load()) }) } } @@ -158,7 +158,7 @@ func Test_blockDownloader_downloadBlock(t *testing.T) { func Test_blockDownloader_downloadBlock_deduplication(t *testing.T) { tests := map[string]struct { cacheEnabled bool - expectedTotalGetBlocksCalls int + expectedTotalGetBlocksCalls int32 }{ "requests to blockClient must be deduplicated by blockPath if cache is enabled": { cacheEnabled: true, @@ -195,7 +195,7 @@ func Test_blockDownloader_downloadBlock_deduplication(t *testing.T) { t.Cleanup(downloader.stop) require.NoError(t, err) - blocksDownloadedCount := atomic.Uint32{} + var blocksDownloadedCount atomic.Uint32 mutex := sync.Mutex{} multiError := util.MultiError{} waitGroup := sync.WaitGroup{} @@ -225,7 +225,7 @@ func Test_blockDownloader_downloadBlock_deduplication(t *testing.T) { require.NoError(t, multiError.Err()) require.Equal(t, uint32(10), blocksDownloadedCount.Load()) - require.Equal(t, testData.expectedTotalGetBlocksCalls, blockClient.getBlockCalls) + require.Equal(t, testData.expectedTotalGetBlocksCalls, blockClient.getBlockCalls.Load()) }) } } @@ -340,11 +340,11 @@ type blockSupplier func() LazyBlock type mockBlockClient struct { responseDelay time.Duration mockData map[string]blockSupplier - getBlockCalls int + getBlockCalls atomic.Int32 } func (m *mockBlockClient) GetBlock(_ context.Context, reference BlockRef) (LazyBlock, error) { - m.getBlockCalls++ + m.getBlockCalls.Inc() time.Sleep(m.responseDelay) supplier, exists := m.mockData[reference.BlockPath] if exists { diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go index d5d981cddb9e..b189cba390b8 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client.go +++ b/pkg/storage/stores/shipper/bloomshipper/client.go @@ -133,17 +133,18 @@ func (b *BloomClient) GetMetas(ctx context.Context, params MetaSearchParams) ([] periodClient := b.periodicObjectClients[periodFrom] for _, table := range tables { prefix := filepath.Join(rootFolder, table, params.TenantID, metasFolder) - list, _, err := periodClient.List(ctx, prefix, delimiter) + list, _, err := periodClient.List(ctx, prefix, "") if err != nil { return nil, fmt.Errorf("error listing metas under prefix [%s]: %w", prefix, err) } for _, object := range list { metaRef, err := createMetaRef(object.Key, params.TenantID, table) + if err != nil { return nil, err } if metaRef.MaxFingerprint < uint64(params.MinFingerprint) || uint64(params.MaxFingerprint) < metaRef.MinFingerprint || - metaRef.StartTimestamp.Before(params.StartTimestamp) || metaRef.EndTimestamp.After(params.EndTimestamp) { + metaRef.EndTimestamp.Before(params.StartTimestamp) || metaRef.StartTimestamp.After(params.EndTimestamp) { continue } meta, err := b.downloadMeta(ctx, metaRef, periodClient) diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go index aa30ec4901f0..96af5e987c3d 100644 --- a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go +++ b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go @@ -5,10 +5,10 @@ import ( "io" "os" "path/filepath" - "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/google/uuid" v1 "github.com/grafana/loki/pkg/storage/bloom/v1" ) @@ -42,8 +42,9 @@ func UncompressBloomBlock(block *LazyBlock, workingDirectory string, logger log. if err != nil { return "", fmt.Errorf("error writing data to temp file: %w", err) } + level.Info(logger).Log("msg", "extracting archive", "archive", archivePath, "workingDirectory", workingDirectoryPath, "blockPath", block.BlockPath) defer func() { - os.Remove(archivePath) + err = os.Remove(archivePath) if err != nil { level.Error(logger).Log("msg", "removing archive file", "err", err, "file", archivePath) } @@ -57,7 +58,7 @@ func UncompressBloomBlock(block *LazyBlock, workingDirectory string, logger log. func writeDataToTempFile(workingDirectoryPath string, block *LazyBlock) (string, error) { defer block.Data.Close() - archivePath := filepath.Join(workingDirectoryPath, block.BlockPath[strings.LastIndex(block.BlockPath, "/")+1:]) + archivePath := filepath.Join(workingDirectoryPath, uuid.New().String()) archiveFile, err := os.Create(archivePath) if err != nil { @@ -74,7 +75,7 @@ func writeDataToTempFile(workingDirectoryPath string, block *LazyBlock) (string, func extractArchive(archivePath string, workingDirectoryPath string) error { file, err := os.Open(archivePath) if err != nil { - return fmt.Errorf("error opening archive file %s: %w", file.Name(), err) + return fmt.Errorf("error opening archive file %s: %w", archivePath, err) } return v1.UnTarGz(workingDirectoryPath, file) } diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go index d7038fc13761..36bfba913c98 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go @@ -1,7 +1,6 @@ package bloomshipper import ( - "cmp" "context" "fmt" "math" @@ -12,9 +11,33 @@ import ( "github.com/prometheus/common/model" "golang.org/x/exp/slices" + v1 "github.com/grafana/loki/pkg/storage/bloom/v1" "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config" ) +type fpRange [2]uint64 + +func (r fpRange) minFp() uint64 { + return r[0] +} + +func (r fpRange) maxFp() uint64 { + return r[1] +} + +type BlockQuerierWithFingerprintRange struct { + *v1.BlockQuerier + MinFp, MaxFp model.Fingerprint +} + +type ForEachBlockCallback func(bq *v1.BlockQuerier, minFp, maxFp uint64) error + +type Interface interface { + GetBlockRefs(ctx context.Context, tenant string, from, through model.Time) ([]BlockRef, error) + Fetch(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error + Stop() +} + type Shipper struct { client Client config config.Config @@ -43,7 +66,7 @@ func NewShipper(client Client, config config.Config, limits Limits, logger log.L func (s *Shipper) GetBlockRefs(ctx context.Context, tenantID string, from, through model.Time) ([]BlockRef, error) { level.Debug(s.logger).Log("msg", "GetBlockRefs", "tenant", tenantID, "from", from, "through", through) - blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from, through, []uint64{0, math.MaxUint64}) + blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from, through, []fpRange{{0, math.MaxUint64}}) if err != nil { return nil, fmt.Errorf("error fetching active block references : %w", err) } @@ -55,30 +78,36 @@ func (s *Shipper) Fetch(ctx context.Context, tenantID string, blocks []BlockRef, defer cancelFunc() blocksChannel, errorsChannel := s.blockDownloader.downloadBlocks(cancelContext, tenantID, blocks) + // track how many blocks are still remaning to be downloaded + remaining := len(blocks) + for { select { case <-ctx.Done(): return fmt.Errorf("failed to fetch blocks: %w", ctx.Err()) - case result, ok := <-blocksChannel: - if !ok { + case result, sentBeforeClosed := <-blocksChannel: + if !sentBeforeClosed { return nil } err := runCallback(callback, result) if err != nil { return err } - case err := <-errorsChannel: - if err != nil { - return fmt.Errorf("error downloading blocks : %w", err) + remaining-- + if remaining == 0 { + return nil } + case err := <-errorsChannel: + return fmt.Errorf("error downloading blocks : %w", err) } } } func runCallback(callback ForEachBlockCallback, block blockWithQuerier) error { - defer func(result blockWithQuerier) { - _ = result.Close() + defer func(b blockWithQuerier) { + _ = b.Close() }(block) + err := callback(block.closableBlockQuerier.BlockQuerier, block.MinFingerprint, block.MaxFingerprint) if err != nil { return fmt.Errorf("error running callback function for block %s err: %w", block.BlockPath, err) @@ -86,17 +115,6 @@ func runCallback(callback ForEachBlockCallback, block blockWithQuerier) error { return nil } -func (s *Shipper) ForEachBlock(ctx context.Context, tenantID string, from, through model.Time, fingerprints []uint64, callback ForEachBlockCallback) error { - level.Debug(s.logger).Log("msg", "ForEachBlock", "tenant", tenantID, "from", from, "through", through, "fingerprints", len(fingerprints)) - - blockRefs, err := s.getActiveBlockRefs(ctx, tenantID, from, through, fingerprints) - if err != nil { - return fmt.Errorf("error fetching active block references : %w", err) - } - - return s.Fetch(ctx, tenantID, blockRefs, callback) -} - func (s *Shipper) Stop() { s.client.Stop() s.blockDownloader.stop() @@ -112,18 +130,19 @@ func getFirstLast[T any](s []T) (T, T) { return s[0], s[len(s)-1] } -func (s *Shipper) getActiveBlockRefs(ctx context.Context, tenantID string, from, through model.Time, fingerprints []uint64) ([]BlockRef, error) { - minFingerprint, maxFingerprint := getFirstLast(fingerprints) +func (s *Shipper) getActiveBlockRefs(ctx context.Context, tenantID string, from, through model.Time, fingerprints []fpRange) ([]BlockRef, error) { + minFpRange, maxFpRange := getFirstLast(fingerprints) metas, err := s.client.GetMetas(ctx, MetaSearchParams{ TenantID: tenantID, - MinFingerprint: model.Fingerprint(minFingerprint), - MaxFingerprint: model.Fingerprint(maxFingerprint), + MinFingerprint: model.Fingerprint(minFpRange.minFp()), + MaxFingerprint: model.Fingerprint(maxFpRange.maxFp()), StartTimestamp: from, EndTimestamp: through, }) if err != nil { return []BlockRef{}, fmt.Errorf("error fetching meta.json files: %w", err) } + level.Debug(s.logger).Log("msg", "dowloaded metas", "count", len(metas)) activeBlocks := s.findBlocks(metas, from, through, fingerprints) slices.SortStableFunc(activeBlocks, func(a, b BlockRef) int { if a.MinFingerprint < b.MinFingerprint { @@ -138,7 +157,7 @@ func (s *Shipper) getActiveBlockRefs(ctx context.Context, tenantID string, from, return activeBlocks, nil } -func (s *Shipper) findBlocks(metas []Meta, startTimestamp, endTimestamp model.Time, fingerprints []uint64) []BlockRef { +func (s *Shipper) findBlocks(metas []Meta, startTimestamp, endTimestamp model.Time, fingerprints []fpRange) []BlockRef { outdatedBlocks := make(map[string]interface{}) for _, meta := range metas { for _, tombstone := range meta.Tombstones { @@ -164,39 +183,29 @@ func (s *Shipper) findBlocks(metas []Meta, startTimestamp, endTimestamp model.Ti return blockRefs } -// getPosition returns the smallest index of element v in slice s where v > s[i] -// TODO(chaudum): Use binary search to find index instead of iteration. -func getPosition[S ~[]E, E cmp.Ordered](s S, v E) int { - for i := range s { - if v > s[i] { - continue - } - return i - } - return len(s) -} - -func isOutsideRange(b *BlockRef, startTimestamp, endTimestamp model.Time, fingerprints []uint64) bool { +// isOutsideRange tests if a given BlockRef b is outside of search boundaries +// defined by min/max timestamp and min/max fingerprint. +// Fingerprint ranges must be sorted in ascending order. +func isOutsideRange(b *BlockRef, startTimestamp, endTimestamp model.Time, fingerprints []fpRange) bool { // First, check time range if b.EndTimestamp < startTimestamp || b.StartTimestamp > endTimestamp { return true } // Then, check if outside of min/max of fingerprint slice - minFp, maxFp := getFirstLast(fingerprints) - if b.MaxFingerprint < minFp || b.MinFingerprint > maxFp { + minFpRange, maxFpRange := getFirstLast(fingerprints) + if b.MaxFingerprint < minFpRange.minFp() || b.MinFingerprint > maxFpRange.maxFp() { return true } - // Check if the block range is inside a "gap" in the fingerprint slice - // e.g. - // fingerprints = [1, 2, 6, 7, 8] - // block = [3, 4, 5] - idx := getPosition[[]uint64](fingerprints, b.MinFingerprint) - // in case b.MinFingerprint is outside of the fingerprints range, return true - // this is already covered in the range check above, but I keep it as a second gate - if idx > len(fingerprints)-1 { - return true + prev := fpRange{0, 0} + for i := 0; i < len(fingerprints); i++ { + fpr := fingerprints[i] + if b.MinFingerprint > prev.maxFp() && b.MaxFingerprint < fpr.minFp() { + return true + } + prev = fpr } - return b.MaxFingerprint < fingerprints[idx] + + return false } diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go index 83c9379cd44c..859aa38c82a6 100644 --- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go @@ -4,6 +4,7 @@ import ( "fmt" "math" "testing" + "time" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -40,7 +41,7 @@ func Test_Shipper_findBlocks(t *testing.T) { } shipper := &Shipper{} - blocks := shipper.findBlocks(metas, 300, 400, []uint64{100, 200}) + blocks := shipper.findBlocks(metas, model.Now().Add(-2*time.Hour), model.Now().Add(-1*time.Hour), []fpRange{{100, 200}}) expectedBlockRefs := []BlockRef{ createMatchingBlockRef("block2"), @@ -53,8 +54,8 @@ func Test_Shipper_findBlocks(t *testing.T) { tests := map[string]struct { minFingerprint uint64 maxFingerprint uint64 - startTimestamp int64 - endTimestamp int64 + startTimestamp model.Time + endTimestamp model.Time filtered bool }{ "expected block not to be filtered out if minFingerprint and startTimestamp are within range": { @@ -94,7 +95,7 @@ func Test_Shipper_findBlocks(t *testing.T) { t.Run(name, func(t *testing.T) { shipper := &Shipper{} ref := createBlockRef("fake-block", data.minFingerprint, data.maxFingerprint, data.startTimestamp, data.endTimestamp) - blocks := shipper.findBlocks([]Meta{{Blocks: []BlockRef{ref}}}, 300, 400, []uint64{100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200}) + blocks := shipper.findBlocks([]Meta{{Blocks: []BlockRef{ref}}}, 300, 400, []fpRange{{100, 200}}) if data.filtered { require.Empty(t, blocks) return @@ -105,94 +106,83 @@ func Test_Shipper_findBlocks(t *testing.T) { } } -func TestGetPosition(t *testing.T) { - for i, tc := range []struct { - s []int - v int - exp int - }{ - {s: []int{}, v: 1, exp: 0}, - {s: []int{1, 2, 3}, v: 0, exp: 0}, - {s: []int{1, 2, 3}, v: 2, exp: 1}, - {s: []int{1, 2, 3}, v: 4, exp: 3}, - {s: []int{1, 2, 4, 5}, v: 3, exp: 2}, - } { - tc := tc - name := fmt.Sprintf("case-%d", i) - t.Run(name, func(t *testing.T) { - got := getPosition[[]int](tc.s, tc.v) - require.Equal(t, tc.exp, got) - }) - } -} - func TestIsOutsideRange(t *testing.T) { + startTs := model.Time(1000) + endTs := model.Time(2000) + t.Run("is outside if startTs > through", func(t *testing.T) { - b := createBlockRef("block", 0, math.MaxUint64, 100, 200) - isOutside := isOutsideRange(&b, 0, 90, []uint64{}) + b := createBlockRef("block", 0, math.MaxUint64, startTs, endTs) + isOutside := isOutsideRange(&b, 0, 900, []fpRange{}) require.True(t, isOutside) }) t.Run("is outside if endTs < from", func(t *testing.T) { - b := createBlockRef("block", 0, math.MaxUint64, 100, 200) - isOutside := isOutsideRange(&b, 210, 300, []uint64{}) + b := createBlockRef("block", 0, math.MaxUint64, startTs, endTs) + isOutside := isOutsideRange(&b, 2100, 3000, []fpRange{}) require.True(t, isOutside) }) t.Run("is outside if endFp < first fingerprint", func(t *testing.T) { - b := createBlockRef("block", 0, 90, 100, 200) - isOutside := isOutsideRange(&b, 100, 200, []uint64{100, 200}) + b := createBlockRef("block", 0, 90, startTs, endTs) + isOutside := isOutsideRange(&b, startTs, endTs, []fpRange{{100, 199}}) require.True(t, isOutside) }) t.Run("is outside if startFp > last fingerprint", func(t *testing.T) { - b := createBlockRef("block", 210, math.MaxUint64, 100, 200) - isOutside := isOutsideRange(&b, 100, 200, []uint64{100, 200}) + b := createBlockRef("block", 200, math.MaxUint64, startTs, endTs) + isOutside := isOutsideRange(&b, startTs, endTs, []fpRange{{0, 49}, {100, 149}}) require.True(t, isOutside) }) t.Run("is outside if within gaps in fingerprints", func(t *testing.T) { - b := createBlockRef("block", 100, 200, 100, 200) - isOutside := isOutsideRange(&b, 100, 200, []uint64{0, 99, 201, 300}) + b := createBlockRef("block", 100, 199, startTs, endTs) + isOutside := isOutsideRange(&b, startTs, endTs, []fpRange{{0, 99}, {200, 299}}) require.True(t, isOutside) }) t.Run("is not outside if within fingerprints 1", func(t *testing.T) { - b := createBlockRef("block", 100, 200, 100, 200) - isOutside := isOutsideRange(&b, 100, 200, []uint64{0, 100, 200, 300}) + b := createBlockRef("block", 10, 90, startTs, endTs) + isOutside := isOutsideRange(&b, startTs, endTs, []fpRange{{0, 99}, {200, 299}}) require.False(t, isOutside) }) t.Run("is not outside if within fingerprints 2", func(t *testing.T) { - b := createBlockRef("block", 100, 150, 100, 200) - isOutside := isOutsideRange(&b, 100, 200, []uint64{0, 100, 200, 300}) + b := createBlockRef("block", 210, 290, startTs, endTs) + isOutside := isOutsideRange(&b, startTs, endTs, []fpRange{{0, 99}, {200, 299}}) + require.False(t, isOutside) + }) + + t.Run("is not outside if spans across multiple fingerprint ranges", func(t *testing.T) { + b := createBlockRef("block", 50, 250, startTs, endTs) + isOutside := isOutsideRange(&b, startTs, endTs, []fpRange{{0, 99}, {200, 299}}) require.False(t, isOutside) }) - t.Run("is not outside if within fingerprints 3", func(t *testing.T) { - b := createBlockRef("block", 150, 200, 100, 200) - isOutside := isOutsideRange(&b, 100, 200, []uint64{0, 100, 200, 300}) + t.Run("is not outside if fingerprint range and time range are larger than block", func(t *testing.T) { + b := createBlockRef("block", math.MaxUint64/3, math.MaxUint64/3*2, startTs, endTs) + isOutside := isOutsideRange(&b, 0, 3000, []fpRange{{0, math.MaxUint64}}) require.False(t, isOutside) }) } func createMatchingBlockRef(blockPath string) BlockRef { - return createBlockRef(blockPath, 0, uint64(math.MaxUint64), 0, math.MaxInt) + return createBlockRef(blockPath, 0, math.MaxUint64, model.Time(0), model.Now()) } func createBlockRef( blockPath string, minFingerprint, maxFingerprint uint64, - startTimestamp, endTimestamp int64, + startTimestamp, endTimestamp model.Time, ) BlockRef { + day := startTimestamp.Unix() / int64(24*time.Hour/time.Second) return BlockRef{ Ref: Ref{ TenantID: "fake", - TableName: "16600", + TableName: fmt.Sprintf("%d", day), MinFingerprint: minFingerprint, MaxFingerprint: maxFingerprint, - StartTimestamp: model.Time(startTimestamp), - EndTimestamp: model.Time(endTimestamp), + StartTimestamp: startTimestamp, + EndTimestamp: endTimestamp, Checksum: 0, }, // block path is unique, and it's used to distinguish the blocks so the rest of the fields might be skipped in this test diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go deleted file mode 100644 index 06e1d7a4675b..000000000000 --- a/pkg/storage/stores/shipper/bloomshipper/store.go +++ /dev/null @@ -1,99 +0,0 @@ -package bloomshipper - -import ( - "context" - "sort" - "time" - - "github.com/prometheus/common/model" - - v1 "github.com/grafana/loki/pkg/storage/bloom/v1" -) - -type ForEachBlockCallback func(bq *v1.BlockQuerier, minFp, maxFp uint64) error - -type ReadShipper interface { - GetBlockRefs(ctx context.Context, tenant string, from, through model.Time) ([]BlockRef, error) - ForEachBlock(ctx context.Context, tenant string, from, through model.Time, fingerprints []uint64, callback ForEachBlockCallback) error - Fetch(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error -} - -type Interface interface { - ReadShipper - Stop() -} - -type BlockQuerierWithFingerprintRange struct { - *v1.BlockQuerier - MinFp, MaxFp model.Fingerprint -} - -type Store interface { - GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error) - GetBlockQueriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) ([]BlockQuerierWithFingerprintRange, error) - GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error) - ForEach(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error - Stop() -} - -type BloomStore struct { - shipper Interface -} - -func NewBloomStore(shipper Interface) (*BloomStore, error) { - return &BloomStore{ - shipper: shipper, - }, nil -} - -func (bs *BloomStore) Stop() { - bs.shipper.Stop() -} - -// GetBlockRefs implements Store -func (bs *BloomStore) GetBlockRefs(ctx context.Context, tenant string, from, through time.Time) ([]BlockRef, error) { - return bs.shipper.GetBlockRefs(ctx, tenant, toModelTime(from), toModelTime(through)) -} - -// ForEach implements Store -func (bs *BloomStore) ForEach(ctx context.Context, tenant string, blocks []BlockRef, callback ForEachBlockCallback) error { - return bs.shipper.Fetch(ctx, tenant, blocks, callback) -} - -// GetQueriersForBlocks implements Store -func (bs *BloomStore) GetBlockQueriersForBlockRefs(ctx context.Context, tenant string, blocks []BlockRef) ([]BlockQuerierWithFingerprintRange, error) { - bqs := make([]BlockQuerierWithFingerprintRange, 0, 32) - err := bs.shipper.Fetch(ctx, tenant, blocks, func(bq *v1.BlockQuerier, minFp uint64, maxFp uint64) error { - bqs = append(bqs, BlockQuerierWithFingerprintRange{ - BlockQuerier: bq, - MinFp: model.Fingerprint(minFp), - MaxFp: model.Fingerprint(maxFp), - }) - return nil - }) - sort.Slice(bqs, func(i, j int) bool { - return bqs[i].MinFp < bqs[j].MinFp - }) - return bqs, err -} - -// BlockQueriers implements Store -func (bs *BloomStore) GetBlockQueriers(ctx context.Context, tenant string, from, through time.Time, fingerprints []uint64) ([]BlockQuerierWithFingerprintRange, error) { - bqs := make([]BlockQuerierWithFingerprintRange, 0, 32) - err := bs.shipper.ForEachBlock(ctx, tenant, toModelTime(from), toModelTime(through), fingerprints, func(bq *v1.BlockQuerier, minFp uint64, maxFp uint64) error { - bqs = append(bqs, BlockQuerierWithFingerprintRange{ - BlockQuerier: bq, - MinFp: model.Fingerprint(minFp), - MaxFp: model.Fingerprint(maxFp), - }) - return nil - }) - sort.Slice(bqs, func(i, j int) bool { - return bqs[i].MinFp < bqs[j].MinFp - }) - return bqs, err -} - -func toModelTime(t time.Time) model.Time { - return model.TimeFromUnixNano(t.UnixNano()) -} diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go deleted file mode 100644 index ec48f7caa040..000000000000 --- a/pkg/storage/stores/shipper/bloomshipper/store_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package bloomshipper - -import ( - "testing" -) - -func TestBloomShipper(_ *testing.T) { -} - -func TestBloomStore(_ *testing.T) { -} diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go b/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go index 1040bd6c1b56..8b0f186386bd 100644 --- a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go +++ b/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go @@ -204,7 +204,7 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ return nil, err } - predicate := chunk.NewPredicate(matchers, *(&req.Filters)) + predicate := chunk.NewPredicate(matchers, req.Filters) chunks, _, err := g.indexQuerier.GetChunks(ctx, instanceID, req.From, req.Through, predicate) if err != nil { return nil, err @@ -219,8 +219,11 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ } } + initialChunkCount := len(result.Refs) + // Return unfiltered results if there is no bloom querier (Bloom Gateway disabled) or if there are not filters. if g.bloomQuerier == nil || len(req.Filters) == 0 { + level.Info(g.log).Log("msg", "chunk filtering is not enabled or there is no line filter", "filters", len(req.Filters)) return result, nil } @@ -234,6 +237,7 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ } result.Refs = chunkRefs + level.Info(g.log).Log("msg", "return filtered chunk refs", "unfiltered", initialChunkCount, "filtered", len(result.Refs)) return result, nil } diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager.go b/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager.go index d9f6382c2d2b..bae41255554d 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager.go @@ -144,66 +144,75 @@ func NewHeadManager(name string, logger log.Logger, dir string, metrics *Metrics return m } -func (m *HeadManager) loop() { - defer m.wg.Done() +func (m *HeadManager) buildPrev() error { + if m.prev == nil { + return nil + } - buildPrev := func() error { - if m.prev == nil { - return nil - } + if err := m.buildTSDBFromHead(m.prevHeads); err != nil { + return err + } - if err := m.buildTSDBFromHead(m.prevHeads); err != nil { - return err - } + // Now that the tsdbManager has the updated TSDBs, we can remove our references + m.mtx.Lock() + defer m.mtx.Unlock() + // We nil-out the previous wal to signal that we've built the TSDBs from it successfully. + // We don't nil-out the heads because we need to keep the them around + // in order to serve queries for the recently rotated out period until + // the index-gws|queriers have time to download the new TSDBs + m.prev = nil - // Now that the tsdbManager has the updated TSDBs, we can remove our references - m.mtx.Lock() - defer m.mtx.Unlock() - m.prevHeads = nil - m.prev = nil + return nil +} - return nil +// tick handles one iteration for `loop()`. It builds new heads, +// cleans up previous heads, and performs rotations. +func (m *HeadManager) tick(now time.Time) { + // retry tsdb build failures from previous run + if err := m.buildPrev(); err != nil { + level.Error(m.log).Log( + "msg", "failed building tsdb head", + "period", m.period.PeriodFor(m.prev.initialized), + "err", err, + ) + // rotating head without building prev would result in loss of index for that period (until restart) + return + } + + if activePeriod := m.period.PeriodFor(m.activeHeads.start); m.period.PeriodFor(now) > activePeriod { + if err := m.Rotate(now); err != nil { + m.metrics.headRotations.WithLabelValues(statusFailure).Inc() + level.Error(m.log).Log( + "msg", "failed rotating tsdb head", + "period", activePeriod, + "err", err, + ) + return + } + m.metrics.headRotations.WithLabelValues(statusSuccess).Inc() } + // build tsdb from rotated-out period + if err := m.buildPrev(); err != nil { + level.Error(m.log).Log( + "msg", "failed building tsdb head", + "period", m.period.PeriodFor(m.prev.initialized), + "err", err, + ) + } +} + +func (m *HeadManager) loop() { + defer m.wg.Done() + ticker := time.NewTicker(defaultRotationCheckPeriod) defer ticker.Stop() for { select { case <-ticker.C: - // retry tsdb build failures from previous run - if err := buildPrev(); err != nil { - level.Error(m.log).Log( - "msg", "failed building tsdb head", - "period", m.period.PeriodFor(m.prev.initialized), - "err", err, - ) - // rotating head without building prev would result in loss of index for that period (until restart) - continue - } - now := time.Now() - if activePeriod := m.period.PeriodFor(m.activeHeads.start); m.period.PeriodFor(now) > activePeriod { - if err := m.Rotate(now); err != nil { - m.metrics.headRotations.WithLabelValues(statusFailure).Inc() - level.Error(m.log).Log( - "msg", "failed rotating tsdb head", - "period", activePeriod, - "err", err, - ) - continue - } - m.metrics.headRotations.WithLabelValues(statusSuccess).Inc() - } - - // build tsdb from rotated-out period - if err := buildPrev(); err != nil { - level.Error(m.log).Log( - "msg", "failed building tsdb head", - "period", m.period.PeriodFor(m.prev.initialized), - "err", err, - ) - } + m.tick(now) case <-m.cancel: return } diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager_test.go b/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager_test.go index f8a1b38a1fd7..c58e55645717 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager_test.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/head_manager_test.go @@ -43,7 +43,7 @@ func newNoopTSDBManager(name, dir string) noopTSDBManager { } func (m noopTSDBManager) BuildFromHead(_ *tenantHeads) error { - panic("BuildFromHead not implemented") + return nil } func (m noopTSDBManager) BuildFromWALs(_ time.Time, wals []WALIdentifier, _ bool) error { @@ -266,6 +266,61 @@ func Test_HeadManager_RecoverHead(t *testing.T) { } +// test head still serves data for the most recently rotated period. +func Test_HeadManager_QueryAfterRotate(t *testing.T) { + now := time.Now() + dir := t.TempDir() + cases := []struct { + Labels labels.Labels + Fingerprint uint64 + Chunks []index.ChunkMeta + User string + }{ + { + User: "tenant1", + Labels: mustParseLabels(`{foo="bar", bazz="buzz"}`), + Fingerprint: mustParseLabels(`{foo="bar", bazz="buzz"}`).Hash(), + Chunks: []index.ChunkMeta{ + { + MinTime: 1, + MaxTime: 10, + Checksum: 3, + }, + }, + }, + } + + storeName := "store_2010-10-10" + mgr := NewHeadManager(storeName, log.NewNopLogger(), dir, NewMetrics(nil), newNoopTSDBManager(storeName, dir)) + // This bit is normally handled by the Start() fn, but we're testing a smaller surface area + // so ensure our dirs exist + for _, d := range managerRequiredDirs(storeName, dir) { + require.Nil(t, util.EnsureDirectory(d)) + } + require.Nil(t, mgr.Rotate(now)) // initialize head (usually done by Start()) + + // add data for both tenants + for _, tc := range cases { + require.Nil(t, mgr.Append(tc.User, tc.Labels, tc.Labels.Hash(), tc.Chunks)) + } + + nextPeriod := time.Now().Add(time.Duration(mgr.period)) + mgr.tick(nextPeriod) // synthetic tick to rotate head + + for _, c := range cases { + refs, err := mgr.GetChunkRefs( + context.Background(), + c.User, + 0, math.MaxInt64, + nil, nil, + labels.MustNewMatcher(labels.MatchRegexp, "foo", ".+"), + ) + require.Nil(t, err) + require.Equal(t, chunkMetasToChunkRefs(c.User, c.Fingerprint, c.Chunks), refs) + } + +} + // test mgr recover from multiple wals across multiple periods func Test_HeadManager_Lifecycle(t *testing.T) { dir := t.TempDir() diff --git a/pkg/tracing/config.go b/pkg/tracing/config.go index 1c97d88a845d..f9faefa6a730 100644 --- a/pkg/tracing/config.go +++ b/pkg/tracing/config.go @@ -5,7 +5,8 @@ import ( ) type Config struct { - Enabled bool `yaml:"enabled"` + Enabled bool `yaml:"enabled"` + ProfilingEnabled bool `yaml:"profiling_enabled" category:"experimental" doc:"hidden"` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { @@ -14,4 +15,5 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.BoolVar(&cfg.Enabled, prefix+"tracing.enabled", true, "Set to false to disable tracing.") + f.BoolVar(&cfg.ProfilingEnabled, prefix+"tracing.profiling-enabled", true, "Set to true to enable profiling integration.") } diff --git a/pkg/util/config.go b/pkg/util/config.go index 6989931fb618..f54d469690c9 100644 --- a/pkg/util/config.go +++ b/pkg/util/config.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "strings" + "time" "github.com/go-kit/log/level" "github.com/prometheus/common/version" @@ -38,3 +39,10 @@ func PrintConfig(w io.Writer, config interface{}) error { fmt.Fprintf(w, "---\n# Loki Config\n# %s\n%s\n\n", version.Info(), string(lc)) return nil } + +// IngesterQueryOptions exists because querier.Config cannot be passed directly to the queryrange package +// due to an import cycle. +type IngesterQueryOptions interface { + QueryStoreOnly() bool + QueryIngestersWithin() time.Duration +} diff --git a/pkg/util/time.go b/pkg/util/time.go index 8f9e0c01b0a9..b943fea92aad 100644 --- a/pkg/util/time.go +++ b/pkg/util/time.go @@ -87,6 +87,8 @@ func NewDisableableTicker(interval time.Duration) (func(), <-chan time.Time) { return func() { tick.Stop() }, tick.C } +const SplitGap = time.Millisecond + // ForInterval splits the given start and end time into given interval. // The start and end time in splits would be aligned to the interval // except for the start time of first split and end time of last split which would be kept same as original start/end @@ -107,7 +109,7 @@ func ForInterval(interval time.Duration, start, end time.Time, endTimeInclusive if !newEnd.Before(end) { newEnd = end } else if endTimeInclusive { - newEnd = newEnd.Add(-time.Millisecond) + newEnd = newEnd.Add(-SplitGap) } if firstInterval { callback(ogStart, newEnd) diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 7f1f6ea0d734..ac25798c33e3 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -97,6 +97,7 @@ type Limits struct { MaxConcurrentTailRequests int `yaml:"max_concurrent_tail_requests" json:"max_concurrent_tail_requests"` MaxEntriesLimitPerQuery int `yaml:"max_entries_limit_per_query" json:"max_entries_limit_per_query"` MaxCacheFreshness model.Duration `yaml:"max_cache_freshness_per_query" json:"max_cache_freshness_per_query"` + MaxMetadataCacheFreshness model.Duration `yaml:"max_metadata_cache_freshness" json:"max_metadata_cache_freshness"` MaxStatsCacheFreshness model.Duration `yaml:"max_stats_cache_freshness" json:"max_stats_cache_freshness"` MaxQueriersPerTenant uint `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` MaxQueryCapacity float64 `yaml:"max_query_capacity" json:"max_query_capacity"` @@ -106,6 +107,7 @@ type Limits struct { // Query frontend enforced limits. The default is actually parameterized by the queryrange config. QuerySplitDuration model.Duration `yaml:"split_queries_by_interval" json:"split_queries_by_interval"` MetadataQuerySplitDuration model.Duration `yaml:"split_metadata_queries_by_interval" json:"split_metadata_queries_by_interval"` + IngesterQuerySplitDuration model.Duration `yaml:"split_ingester_queries_by_interval" json:"split_ingester_queries_by_interval"` MinShardingLookback model.Duration `yaml:"min_sharding_lookback" json:"min_sharding_lookback"` MaxQueryBytesRead flagext.ByteSize `yaml:"max_query_bytes_read" json:"max_query_bytes_read"` MaxQuerierBytesRead flagext.ByteSize `yaml:"max_querier_bytes_read" json:"max_querier_bytes_read"` @@ -187,6 +189,7 @@ type Limits struct { BloomCompactorShardSize int `yaml:"bloom_compactor_shard_size" json:"bloom_compactor_shard_size"` BloomCompactorMaxTableAge time.Duration `yaml:"bloom_compactor_max_table_age" json:"bloom_compactor_max_table_age"` BloomCompactorEnabled bool `yaml:"bloom_compactor_enable_compaction" json:"bloom_compactor_enable_compaction"` + BloomCompactorChunksBatchSize int `yaml:"bloom_compactor_chunks_batch_size" json:"bloom_compactor_chunks_batch_size"` BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length"` BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip"` BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate"` @@ -275,6 +278,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { _ = l.MaxCacheFreshness.Set("10m") f.Var(&l.MaxCacheFreshness, "frontend.max-cache-freshness", "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") + _ = l.MaxMetadataCacheFreshness.Set("24h") + f.Var(&l.MaxMetadataCacheFreshness, "frontend.max-metadata-cache-freshness", "Do not cache metadata request if the end time is within the frontend.max-metadata-cache-freshness window. Set this to 0 to apply no such limits. Defaults to 24h.") + _ = l.MaxStatsCacheFreshness.Set("10m") f.Var(&l.MaxStatsCacheFreshness, "frontend.max-stats-cache-freshness", "Do not cache requests with an end time that falls within Now minus this duration. 0 disables this feature (default).") @@ -296,9 +302,16 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { _ = l.QuerySplitDuration.Set("1h") f.Var(&l.QuerySplitDuration, "querier.split-queries-by-interval", "Split queries by a time interval and execute in parallel. The value 0 disables splitting by time. This also determines how cache keys are chosen when result caching is enabled.") + // with metadata caching, it is not possible to extract a subset of labels/series from a cached extent because unlike samples they are not associated with a timestamp. + // as a result, we could return inaccurate results. example: returning results from an entire 1h extent for a 5m query + // Setting max_metadata_cache_freshness to 24h should help us avoid caching recent data and preseve the correctness. + // For the portion of the request beyond the freshness window, granularity of the cached metadata results is determined by split_metadata_queries_by_interval. _ = l.MetadataQuerySplitDuration.Set("24h") f.Var(&l.MetadataQuerySplitDuration, "querier.split-metadata-queries-by-interval", "Split metadata queries by a time interval and execute in parallel. The value 0 disables splitting metadata queries by time. This also determines how cache keys are chosen when label/series result caching is enabled.") + _ = l.IngesterQuerySplitDuration.Set("0s") + f.Var(&l.IngesterQuerySplitDuration, "querier.split-ingester-queries-by-interval", "Interval to use for time-based splitting when a request is within the `query_ingesters_within` window; defaults to `split-queries-by-interval` by setting to 0.") + f.StringVar(&l.DeletionMode, "compactor.deletion-mode", "filter-and-delete", "Deletion mode. Can be one of 'disabled', 'filter-only', or 'filter-and-delete'. When set to 'filter-only' or 'filter-and-delete', and if retention_enabled is true, then the log entry deletion API endpoints are available.") // Deprecated @@ -312,6 +325,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.BloomCompactorShardSize, "bloom-compactor.shard-size", 1, "The shard size defines how many bloom compactors should be used by a tenant when computing blooms. If it's set to 0, shuffle sharding is disabled.") f.DurationVar(&l.BloomCompactorMaxTableAge, "bloom-compactor.max-table-age", 7*24*time.Hour, "The maximum age of a table before it is compacted. Do not compact tables older than the the configured time. Default to 7 days. 0s means no limit.") f.BoolVar(&l.BloomCompactorEnabled, "bloom-compactor.enable-compaction", false, "Whether to compact chunks into bloom filters.") + f.IntVar(&l.BloomCompactorChunksBatchSize, "bloom-compactor.chunks-batch-size", 100, "The batch size of the chunks the bloom-compactor downloads at once.") f.IntVar(&l.BloomNGramLength, "bloom-compactor.ngram-length", 4, "Length of the n-grams created when computing blooms from log lines.") f.IntVar(&l.BloomNGramSkip, "bloom-compactor.ngram-skip", 0, "Skip factor for the n-grams created when computing blooms from log lines.") f.Float64Var(&l.BloomFalsePositiveRate, "bloom-compactor.false-positive-rate", 0.01, "Scalable Bloom Filter desired false-positive rate.") @@ -574,6 +588,12 @@ func (o *Overrides) MetadataQuerySplitDuration(userID string) time.Duration { return time.Duration(o.getOverridesForUser(userID).MetadataQuerySplitDuration) } +// IngesterQuerySplitDuration returns the tenant specific splitby interval applied in the query frontend when querying +// during the `query_ingesters_within` window. +func (o *Overrides) IngesterQuerySplitDuration(userID string) time.Duration { + return time.Duration(o.getOverridesForUser(userID).IngesterQuerySplitDuration) +} + // MaxQueryBytesRead returns the maximum bytes a query can read. func (o *Overrides) MaxQueryBytesRead(_ context.Context, userID string) int { return o.getOverridesForUser(userID).MaxQueryBytesRead.Val() @@ -612,6 +632,10 @@ func (o *Overrides) MaxCacheFreshness(_ context.Context, userID string) time.Dur return time.Duration(o.getOverridesForUser(userID).MaxCacheFreshness) } +func (o *Overrides) MaxMetadataCacheFreshness(_ context.Context, userID string) time.Duration { + return time.Duration(o.getOverridesForUser(userID).MaxMetadataCacheFreshness) +} + func (o *Overrides) MaxStatsCacheFreshness(_ context.Context, userID string) time.Duration { return time.Duration(o.getOverridesForUser(userID).MaxStatsCacheFreshness) } @@ -828,6 +852,10 @@ func (o *Overrides) BloomGatewayEnabled(userID string) bool { return o.getOverridesForUser(userID).BloomGatewayEnabled } +func (o *Overrides) BloomCompactorChunksBatchSize(userID string) int { + return o.getOverridesForUser(userID).BloomCompactorChunksBatchSize +} + func (o *Overrides) BloomCompactorShardSize(userID string) int { return o.getOverridesForUser(userID).BloomCompactorShardSize } diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go index 4e449e421c5a..908531f9858f 100644 --- a/pkg/validation/limits_test.go +++ b/pkg/validation/limits_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -290,7 +289,7 @@ query_timeout: 5m } } -func TestLimitsValidation(t *testing.T) { +func TestLimitsValidation_deletionMode(t *testing.T) { for _, tc := range []struct { mode string expected error @@ -300,7 +299,9 @@ func TestLimitsValidation(t *testing.T) { {mode: "filter-and-delete", expected: nil}, {mode: "something-else", expected: deletionmode.ErrUnknownMode}, } { - limits := Limits{DeletionMode: tc.mode} - require.True(t, errors.Is(limits.Validate(), tc.expected)) + t.Run(tc.mode, func(t *testing.T) { + limits := Limits{DeletionMode: tc.mode} + require.ErrorIs(t, limits.Validate(), tc.expected) + }) } } diff --git a/production/docker/config/loki.yaml b/production/docker/config/loki.yaml index 6e4541164a23..0a124e5ccfaa 100644 --- a/production/docker/config/loki.yaml +++ b/production/docker/config/loki.yaml @@ -9,19 +9,20 @@ server: common: path_prefix: /loki - storage: - s3: - endpoint: minio:9000 - insecure: true - bucketnames: loki-data - access_key_id: loki - secret_access_key: supersecret - s3forcepathstyle: true - compactor_address: http://loki-write:3100 + compactor_address: http://loki-backend:3100 replication_factor: 3 +storage_config: + aws: + endpoint: minio:9000 + insecure: true + bucketnames: loki-data + access_key_id: loki + secret_access_key: supersecret + s3forcepathstyle: true + memberlist: - join_members: ["loki-read", "loki-write"] + join_members: ["loki-read", "loki-write", "loki-backend"] dead_node_reclaim_time: 30s gossip_to_dead_nodes_time: 15s left_ingesters_timeout: 30s @@ -54,6 +55,10 @@ ruler: enable_sharding: true wal: dir: /loki/ruler-wal + evaluation: + mode: remote + query_frontend: + address: dns:///loki-read:9095 storage: type: local local: @@ -85,6 +90,13 @@ schema_config: index: prefix: index_ period: 24h + - from: 2024-01-10 + store: tsdb + object_store: s3 + schema: v12 + index: + prefix: index_ + period: 24h limits_config: diff --git a/production/docker/config/prometheus.yaml b/production/docker/config/prometheus.yaml index 9bb03bb20904..3369106f9400 100644 --- a/production/docker/config/prometheus.yaml +++ b/production/docker/config/prometheus.yaml @@ -11,6 +11,7 @@ scrape_configs: - names: - loki-read - loki-write + - loki-backend type: A port: 3100 - job_name: 'promtail' diff --git a/production/docker/docker-compose.yaml b/production/docker/docker-compose.yaml index 5c1b93f82917..a4f74c7bb118 100644 --- a/production/docker/docker-compose.yaml +++ b/production/docker/docker-compose.yaml @@ -89,7 +89,7 @@ services: - | mkdir -p /data/loki-data && \ mkdir -p /data/loki-ruler && - minio server /data + minio server --address "0.0.0.0:9000" --console-address "0.0.0.0:9001" /data environment: - MINIO_ROOT_USER=loki - MINIO_ROOT_PASSWORD=supersecret @@ -97,6 +97,7 @@ services: - MINIO_UPDATE=off ports: - "9000:9000" + - "9001:9001" volumes: - ./.data/minio:/data networks: @@ -116,7 +117,6 @@ services: image: *lokiImage volumes: - ./config:/etc/loki/ - - ./rules:/loki/rules:ro # only needed for interactive debugging with dlv # cap_add: # - SYS_PTRACE @@ -127,7 +127,7 @@ services: - "7946" # uncomment to use interactive debugging # - "40000-40002:40000" # makes the replicas available on ports 40000, 40001, 40002 - command: "-config.file=/etc/loki/loki.yaml -target=read" + command: "-config.file=/etc/loki/loki.yaml -target=read -legacy-read-mode=false" networks: - loki restart: always @@ -161,6 +161,7 @@ services: image: *lokiImage volumes: - ./config:/etc/loki/ + - ./rules:/loki/rules:ro # only needed for interactive debugging with dlv # cap_add: # - SYS_PTRACE diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index ca04f5d18ce5..db040dc19671 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,19 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) + +## 5.41.8 + +- [BUGFIX] Fix gateway: add possibility to disable listening on ipv6 to prevent crash on ipv4-only system. + +## 5.41.7 + +- [FEATURE] Add support to disable specific alert rules + +## 5.41.6 + +- [BUGFIX] Added missing namespace to query-scheduler-discovery service when deploying loki in a specific namespace. + ## 5.41.5 - [BUGFIX] Added "swift" type object storage to resolve Loki HELM Chart error. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 1e08c0c8f0d1..8b5402bc9dbd 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.9.3 -version: 5.41.5 +version: 5.41.8 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index ec3360d378d7..33730c361d31 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 5.41.5](https://img.shields.io/badge/Version-5.41.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) +![Version: 5.41.8](https://img.shields.io/badge/Version-5.41.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/src/alerts.yaml.tpl b/production/helm/loki/src/alerts.yaml.tpl index 2171c94848e5..144e263f7061 100644 --- a/production/helm/loki/src/alerts.yaml.tpl +++ b/production/helm/loki/src/alerts.yaml.tpl @@ -2,6 +2,7 @@ groups: - name: "loki_alerts" rules: +{{- if not (.Values.monitoring.rules.disabled.LokiRequestErrors | default false) }} - alert: "LokiRequestErrors" annotations: message: | @@ -17,6 +18,8 @@ groups: {{- if .Values.monitoring.rules.additionalRuleLabels }} {{ toYaml .Values.monitoring.rules.additionalRuleLabels | indent 10 }} {{- end }} +{{- end }} +{{- if not (.Values.monitoring.rules.disabled.LokiRequestPanics | default false) }} - alert: "LokiRequestPanics" annotations: message: | @@ -28,6 +31,8 @@ groups: {{- if .Values.monitoring.rules.additionalRuleLabels }} {{ toYaml .Values.monitoring.rules.additionalRuleLabels | indent 10 }} {{- end }} +{{- end }} +{{- if not (.Values.monitoring.rules.disabled.LokiRequestLatency | default false) }} - alert: "LokiRequestLatency" annotations: message: | @@ -40,6 +45,8 @@ groups: {{- if .Values.monitoring.rules.additionalRuleLabels }} {{ toYaml .Values.monitoring.rules.additionalRuleLabels | indent 10 }} {{- end }} +{{- end }} +{{- if not (.Values.monitoring.rules.disabled.LokiTooManyCompactorsRunning | default false) }} - alert: "LokiTooManyCompactorsRunning" annotations: message: | @@ -52,6 +59,8 @@ groups: {{- if .Values.monitoring.rules.additionalRuleLabels }} {{ toYaml .Values.monitoring.rules.additionalRuleLabels | indent 10 }} {{- end }} +{{- end }} +{{- if not (.Values.monitoring.rules.disabled.LokiCanaryLatency | default false) }} - name: "loki_canaries_alerts" rules: - alert: "LokiCanaryLatency" @@ -66,3 +75,4 @@ groups: {{- if .Values.monitoring.rules.additionalRuleLabels }} {{ toYaml .Values.monitoring.rules.additionalRuleLabels | indent 10 }} {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index 9ebcc6351957..9dd70123189e 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -695,7 +695,9 @@ http { server { listen 8080; + {{- if .Values.gateway.nginxConfig.enableIPv6 }} listen [::]:8080; + {{- end }} {{- if .Values.gateway.basicAuth.enabled }} auth_basic "Loki"; diff --git a/production/helm/loki/templates/backend/query-scheduler-discovery.yaml b/production/helm/loki/templates/backend/query-scheduler-discovery.yaml index 01865863e57e..a9dedbb54564 100644 --- a/production/helm/loki/templates/backend/query-scheduler-discovery.yaml +++ b/production/helm/loki/templates/backend/query-scheduler-discovery.yaml @@ -5,6 +5,7 @@ apiVersion: v1 kind: Service metadata: name: query-scheduler-discovery + namespace: {{ $.Release.Namespace }} labels: {{- include "loki.backendSelectorLabels" . | nindent 4 }} prometheus.io/service-monitor: "false" diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index b8c09ee76465..c3d62491ce8e 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -583,6 +583,12 @@ monitoring: enabled: true # -- Include alerting rules alerting: true + # -- Specify which individual alerts should be disabled + # -- Instead of turning off each alert one by one, set the .monitoring.rules.alerting value to false instead. + # -- If you disable all the alerts and keep .monitoring.rules.alerting set to true, the chart will fail to render. + disabled: {} + # LokiRequestErrors: true + # LokiRequestPanics: true # -- Alternative namespace to create PrometheusRule resources in namespace: null # -- Additional annotations for the rules PrometheusRule resource @@ -1442,6 +1448,8 @@ gateway: initialDelaySeconds: 15 timeoutSeconds: 1 nginxConfig: + # -- Enable listener for IPv6, disable on IPv4-only systems + enableIPv6: true # -- NGINX log format logFormat: |- main '$remote_addr - $remote_user [$time_local] $status ' diff --git a/production/ksonnet/loki/gateway.libsonnet b/production/ksonnet/loki/gateway.libsonnet index b5b1eb2401ca..d8c923a3bd03 100644 --- a/production/ksonnet/loki/gateway.libsonnet +++ b/production/ksonnet/loki/gateway.libsonnet @@ -48,7 +48,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; server { listen 80; - auth_basic “Prometheus”; + auth_basic "Prometheus"; auth_basic_user_file /etc/nginx/secrets/.htpasswd; proxy_set_header X-Scope-OrgID %(gateway_tenant_id)s; diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json index bcd620e69e4a..9d2544082d15 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json @@ -142,7 +142,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (le) (cluster_job:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"})) * 1e3", + "expr": "histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "99th Percentile", @@ -150,7 +150,7 @@ "step": 10 }, { - "expr": "histogram_quantile(0.50, sum by (le) (cluster_job:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"})) * 1e3", + "expr": "histogram_quantile(0.50, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "50th Percentile", @@ -158,7 +158,7 @@ "step": 10 }, { - "expr": "1e3 * sum(cluster_job:loki_request_duration_seconds_sum:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"}) / sum(cluster_job:loki_request_duration_seconds_count:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"})", + "expr": "1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "Average", diff --git a/production/loki-mixin-compiled/dashboards/loki-writes.json b/production/loki-mixin-compiled/dashboards/loki-writes.json index fdb347f56055..b7cf83f95f44 100644 --- a/production/loki-mixin-compiled/dashboards/loki-writes.json +++ b/production/loki-mixin-compiled/dashboards/loki-writes.json @@ -142,7 +142,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (le) (cluster_job:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\"})) * 1e3", + "expr": "histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "99th Percentile", @@ -150,7 +150,7 @@ "step": 10 }, { - "expr": "histogram_quantile(0.50, sum by (le) (cluster_job:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\"})) * 1e3", + "expr": "histogram_quantile(0.50, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "50th Percentile", @@ -158,7 +158,7 @@ "step": 10 }, { - "expr": "1e3 * sum(cluster_job:loki_request_duration_seconds_sum:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\"}) / sum(cluster_job:loki_request_duration_seconds_count:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\"})", + "expr": "1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "Average", diff --git a/production/loki-mixin/dashboards/loki-writes.libsonnet b/production/loki-mixin/dashboards/loki-writes.libsonnet index a12f4f7cea6e..d5c85337a29d 100644 --- a/production/loki-mixin/dashboards/loki-writes.libsonnet +++ b/production/loki-mixin/dashboards/loki-writes.libsonnet @@ -65,7 +65,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; $.panel('Latency') + utils.latencyRecordingRulePanel( 'loki_request_duration_seconds', - dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].matchers.distributor, + dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].matchers.distributor + [utils.selector.eq('route', 'api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle')], ) ) ) diff --git a/tools/deprecated-config-checker/checker/checker_test.go b/tools/deprecated-config-checker/checker/checker_test.go index efecefb1700f..d9fdf4dc607b 100644 --- a/tools/deprecated-config-checker/checker/checker_test.go +++ b/tools/deprecated-config-checker/checker/checker_test.go @@ -39,6 +39,7 @@ var ( } expectedConfigDeprecates = []string{ + "legacy-read-mode", "ruler.remote_write.client", "index_gateway.ring.replication_factor", "storage_config.bigtable", diff --git a/tools/deprecated-config-checker/deprecated-config.yaml b/tools/deprecated-config-checker/deprecated-config.yaml index ab4c3c073d73..46b89971bdd2 100644 --- a/tools/deprecated-config-checker/deprecated-config.yaml +++ b/tools/deprecated-config-checker/deprecated-config.yaml @@ -13,6 +13,7 @@ # _msg: "Use tsdb (preferred) or boltdb-shipper instead." # # Note that even though the configs in schema_config takes a list, here we specify the deprecated fields for each item in the list. +legacy-read-mode: "Legacy read SSD mode is deprecated and will be eventually removed. Use the new read and backend targets." ruler: remote_write: diff --git a/tools/deprecated-config-checker/test-fixtures/config.yaml b/tools/deprecated-config-checker/test-fixtures/config.yaml index d5a326c8647f..be875f3ac10f 100644 --- a/tools/deprecated-config-checker/test-fixtures/config.yaml +++ b/tools/deprecated-config-checker/test-fixtures/config.yaml @@ -1,4 +1,5 @@ auth_enabled: false +legacy-read-mode: true server: http_listen_port: 3100 diff --git a/tools/packaging/nfpm.jsonnet b/tools/packaging/nfpm.jsonnet index b313cf0e48eb..aff88fe65874 100644 --- a/tools/packaging/nfpm.jsonnet +++ b/tools/packaging/nfpm.jsonnet @@ -13,8 +13,8 @@ local overrides = { loki: { description: ||| - Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus. - It is designed to be very cost effective and easy to operate. + Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus. + It is designed to be very cost effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream. |||, contents+: [ @@ -35,7 +35,7 @@ local overrides = { promtail: { description: ||| - Promtail is an agent which ships the contents of local logs to a private Grafana Loki instance or Grafana Cloud. + Promtail is an agent which ships the contents of local logs to a private Grafana Loki instance or Grafana Cloud. It is usually deployed to every machine that has applications needed to be monitored. |||, license: 'Apache-2.0', @@ -45,7 +45,7 @@ local overrides = { dst: '/etc/systemd/system/promtail.service', }, { - src: './clients/cmd/promtail/promtail-local-config.yaml', + src: './tools/packaging/promtail-minimal-config.yaml', dst: '/etc/promtail/config.yml', type: 'config|noreplace', }, diff --git a/tools/packaging/promtail-minimal-config.yaml b/tools/packaging/promtail-minimal-config.yaml new file mode 100644 index 000000000000..442b33d0d4bc --- /dev/null +++ b/tools/packaging/promtail-minimal-config.yaml @@ -0,0 +1,24 @@ +# This minimal config scrape only single log file. +# Primarily used in rpm/deb packaging where promtail service can be started during system init process. +# And too much scraping during init process can overload the complete system. +# https://github.com/grafana/loki/issues/11398 + +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: +- url: http://localhost:3100/loki/api/v1/push + +scrape_configs: +- job_name: system + static_configs: + - targets: + - localhost + labels: + job: varlogs + #NOTE: Need to be modified to scrape any additional logs of the system. + __path__: /var/log/messages diff --git a/tools/packaging/verify-deb-install.sh b/tools/packaging/verify-deb-install.sh index c9d75e8c89e9..59a11f59fe50 100755 --- a/tools/packaging/verify-deb-install.sh +++ b/tools/packaging/verify-deb-install.sh @@ -21,7 +21,7 @@ cat < /var/log/test.log + echo "blablabla" >> /var/log/messages # Install logcli dpkg -i ${dir}/dist/logcli_*_amd64.deb @@ -31,4 +31,4 @@ cat < /var/log/test.log + echo "blablabla" >> /var/log/messages # Install logcli rpm -i ${dir}/dist/logcli-*.x86_64.rpm @@ -34,4 +34,4 @@ cat <