From 521d40a96a5c1c65c786c73ec374580fe767dd3b Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Tue, 23 Apr 2024 09:31:53 -0300 Subject: [PATCH 01/16] fix: helm: Set compactor addr for distributed mode. (#12748) --- production/helm/loki/templates/_helpers.tpl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index 5a93337f02e5..e8107efdeea0 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -931,13 +931,18 @@ enableServiceLinks: false {{/* Determine compactor address based on target configuration */}} {{- define "loki.compactorAddress" -}} {{- $isSimpleScalable := eq (include "loki.deployment.isScalable" .) "true" -}} +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true "-}} +{{- $isSingleBinary := eq (include "loki.deployment.isSingleBinary" .) "true" -}} {{- $compactorAddress := include "loki.backendFullname" . -}} {{- if and $isSimpleScalable .Values.read.legacyReadTarget -}} {{/* 2 target configuration */}} {{- $compactorAddress = include "loki.readFullname" . -}} -{{- else if (not $isSimpleScalable) -}} +{{- else if $isSingleBinary -}} {{/* single binary */}} {{- $compactorAddress = include "loki.singleBinaryFullname" . -}} +{{/* distributed */}} +{{- else if $isDistributed -}} +{{- $compactorAddress = include "loki.compactorFullname" . -}} {{- end -}} {{- printf "http://%s:%s" $compactorAddress (.Values.loki.server.http_listen_port | toString) }} {{- end }} From c178cc62df569cf5d7ff1ad9647dc82336cae473 Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Tue, 23 Apr 2024 08:40:19 -0400 Subject: [PATCH 02/16] test: Data race updates for memchunk tests (#12752) --- pkg/chunkenc/memchunk_test.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index 8fc3eaab5ab3..09eab22f74be 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -184,7 +184,7 @@ func TestBlock(t *testing.T) { } } - var noopStreamPipeline = log.NewNoopPipeline().ForStream(labels.Labels{}) + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) it, err := chk.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, noopStreamPipeline) require.NoError(t, err) @@ -212,7 +212,7 @@ func TestBlock(t *testing.T) { require.NoError(t, it.Close()) require.Equal(t, len(cases), idx) - countExtractor = func() log.StreamSampleExtractor { + countExtractor := func() log.StreamSampleExtractor { ex, err := log.NewLineSampleExtractor(log.CountExtractor, nil, nil, false, false) if err != nil { panic(err) @@ -276,6 +276,7 @@ func TestCorruptChunk(t *testing.T) { ctx, start, end := context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64) for i, c := range cases { chk.blocks = []block{{b: c.data}} + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) it, err := chk.Iterator(ctx, start, end, logproto.FORWARD, noopStreamPipeline) require.NoError(t, err, "case %d", i) @@ -309,6 +310,7 @@ func TestReadFormatV1(t *testing.T) { t.Fatal(err) } + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) it, err := r.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, noopStreamPipeline) if err != nil { t.Fatal(err) @@ -340,6 +342,7 @@ func TestRoundtripV2(t *testing.T) { assertLines := func(c *MemChunk) { require.Equal(t, enc, c.Encoding()) + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) it, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, noopStreamPipeline) if err != nil { t.Fatal(err) @@ -529,6 +532,7 @@ func TestChunkFilling(t *testing.T) { require.Equal(t, int64(lines), i) + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) it, err := chk.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, noopStreamPipeline) require.NoError(t, err) i = 0 @@ -711,6 +715,7 @@ func TestChunkStats(t *testing.T) { expectedSize := inserted * (len(entry.Line) + 3*binary.MaxVarintLen64) statsCtx, ctx := stats.NewContext(context.Background()) + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) it, err := c.Iterator(ctx, first.Add(-time.Hour), entry.Timestamp.Add(time.Hour), logproto.BACKWARD, noopStreamPipeline) if err != nil { t.Fatal(err) @@ -789,6 +794,7 @@ func TestIteratorClose(t *testing.T) { } { c := newMemChunkWithFormat(f.chunkFormat, enc, f.headBlockFmt, testBlockSize, testTargetSize) inserted := fillChunk(c) + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) iter, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, inserted), logproto.BACKWARD, noopStreamPipeline) if err != nil { t.Fatal(err) @@ -916,6 +922,7 @@ func BenchmarkBackwardIterator(b *testing.B) { _ = fillChunk(c) b.ResetTimer() for n := 0; n < b.N; n++ { + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) iterator, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Now(), logproto.BACKWARD, noopStreamPipeline) if err != nil { panic(err) @@ -938,6 +945,7 @@ func TestGenerateDataSize(t *testing.T) { bytesRead := uint64(0) for _, c := range chunks { + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) // use forward iterator for benchmark -- backward iterator does extra allocations by keeping entries in memory iterator, err := c.Iterator(context.TODO(), time.Unix(0, 0), time.Now(), logproto.FORWARD, noopStreamPipeline) if err != nil { @@ -977,6 +985,7 @@ func BenchmarkHeadBlockIterator(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) iter := h.Iterator(context.Background(), logproto.BACKWARD, 0, math.MaxInt64, noopStreamPipeline) for iter.Next() { @@ -1061,6 +1070,7 @@ func TestMemChunk_IteratorBounds(t *testing.T) { tt := tt c := createChunk() + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) // testing headchunk it, err := c.Iterator(context.Background(), tt.mint, tt.maxt, tt.direction, noopStreamPipeline) require.NoError(t, err) @@ -1091,6 +1101,7 @@ func TestMemchunkLongLine(t *testing.T) { for i := 1; i <= 10; i++ { require.NoError(t, c.Append(&logproto.Entry{Timestamp: time.Unix(0, int64(i)), Line: strings.Repeat("e", 200000)})) } + noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{}) it, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, noopStreamPipeline) require.NoError(t, err) for i := 1; i <= 10; i++ { From 8b63b66c71ad9454d84be2f18c305392bc132ada Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Tue, 23 Apr 2024 16:07:18 +0200 Subject: [PATCH 03/16] docs: Update supported and deprecated type in Manage > Storage (#12742) Make distinction between supported and deprecated store types more visible on this page, similar to what's listed under Configure > Storage. Also updated the list of supported object store backends. Signed-off-by: Christian Haudum --- docs/sources/operations/storage/_index.md | 44 +++++++++++------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/sources/operations/storage/_index.md b/docs/sources/operations/storage/_index.md index f1947d072b56..26862d3a88fb 100644 --- a/docs/sources/operations/storage/_index.md +++ b/docs/sources/operations/storage/_index.md @@ -9,16 +9,16 @@ You can read a high level overview of Loki storage [here](https://grafana.com/do Grafana Loki needs to store two different types of data: **chunks** and **indexes**. +When using Accelerated Search (experimental), then a third data type is used: **bloom blocks**. + Loki receives logs in separate streams, where each stream is uniquely identified by its tenant ID and its set of labels. As log entries from a stream arrive, -they are compressed as "chunks" and saved in the chunks store. See [chunk +they are compressed as **chunks** and saved in the chunks store. See [chunk format](#chunk-format) for how chunks are stored internally. The **index** stores each stream's label set and links them to the individual -chunks. - -Refer to Loki's [configuration](https://grafana.com/docs/loki//configure/) for details on -how to configure the storage and the index. +chunks. Refer to Loki's [configuration](https://grafana.com/docs/loki//configure/) for +details on how to configure the storage and the index. For more information: @@ -26,36 +26,36 @@ For more information: - [Retention](https://grafana.com/docs/loki//operations/storage/retention/) - [Logs Deletion](https://grafana.com/docs/loki//operations/storage/logs-deletion/) -## Supported Stores +## Store Types -The following are supported for the index: +### ✅ Supported index stores -- [TSDB](https://grafana.com/docs/loki//operations/storage/tsdb/) index store which stores TSDB index files in the object store. This is the recommended index store for Loki 2.8 and newer. -- [Single Store (boltdb-shipper)](https://grafana.com/docs/loki//operations/storage/boltdb-shipper/) index store which stores boltdb index files in the object store. -- [Amazon DynamoDB](https://aws.amazon.com/dynamodb) -- [Google Bigtable](https://cloud.google.com/bigtable) -- [Apache Cassandra](https://cassandra.apache.org) -- [BoltDB](https://github.com/boltdb/bolt) (doesn't work when clustering Loki) +- [Single Store TSDB](https://grafana.com/docs/loki//operations/storage/tsdb/) index store which stores TSDB index files in the object store. + **This is the recommended index store for Loki 2.8 and newer.** +- [Single Store BoltDB (boltdb-shipper)](https://grafana.com/docs/loki//operations/storage/boltdb-shipper/) index store which stores boltdb index files in the object store. -The following are deprecated for the index and will be removed in a future release: +### ❌ Deprecated index stores - [Amazon DynamoDB](https://aws.amazon.com/dynamodb). Support for this is deprecated and will be removed in a future release. - [Google Bigtable](https://cloud.google.com/bigtable). Support for this is deprecated and will be removed in a future release. - [Apache Cassandra](https://cassandra.apache.org). Support for this is deprecated and will be removed in a future release. +- [BoltDB](https://github.com/boltdb/bolt) (doesn't work when clustering Loki) -The following are supported and recommended for the chunks: +### ✅ Supported and recommended chunks stores -- [Amazon S3](https://aws.amazon.com/s3) -- [Google Cloud Storage](https://cloud.google.com/storage/) -- [Filesystem]({{< relref "./filesystem" >}}) (please read more about the filesystem to understand the pros/cons before using with production data) -- [Baidu Object Storage](https://cloud.baidu.com/product/bos.html) -- [IBM Cloud Object Storage](https://www.ibm.com/cloud/object-storage) +- [Amazon Simple Storage Storage (S3)](https://aws.amazon.com/s3) +- [Google Cloud Storage (GCS)](https://cloud.google.com/storage/) +- [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) +- [IBM Cloud Object Storage (COS)](https://www.ibm.com/cloud/object-storage) +- [Baidu Object Storage (BOS)](https://cloud.baidu.com/product/bos.html) +- [Alibaba Object Storage Service (OSS)](https://www.alibabacloud.com/product/object-storage-service) -The following are supported for the chunks, but not typically recommended for production use: +### ⚠️ Supported chunks stores, not typically recommended for production use - [Filesystem]({{< relref "./filesystem" >}}) (please read more about the filesystem to understand the pros/cons before using with production data) +- S3 API compatible storage, such as [MinIO](https://min.io/) -The following are deprecated for the chunks and will be removed in a future release: +### ❌ Deprecated chunks stores - [Amazon DynamoDB](https://aws.amazon.com/dynamodb). Support for this is deprecated and will be removed in a future release. - [Google Bigtable](https://cloud.google.com/bigtable). Support for this is deprecated and will be removed in a future release. From 904ef6eacdab6d7ea5946c4fe5b248c2e97582be Mon Sep 17 00:00:00 2001 From: J Stickler Date: Tue, 23 Apr 2024 10:58:10 -0400 Subject: [PATCH 04/16] docs: fix broken link (#12757) --- docs/sources/send-data/promtail/installation.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/sources/send-data/promtail/installation.md b/docs/sources/send-data/promtail/installation.md index 3a1628687405..2daa12638083 100644 --- a/docs/sources/send-data/promtail/installation.md +++ b/docs/sources/send-data/promtail/installation.md @@ -15,13 +15,13 @@ or there is a Helm chart to install it in a Kubernetes cluster. ## Install the binary Every Grafana Loki release includes binaries for Promtail which can be found on the -[Releases page](https://github.com/grafana/loki/releases) as part of the release assets. +[Releases page](https://github.com/grafana/loki/releases) as part of the release assets. ## Install using APT or RPM package manager -See the instructions [here](https://grafana.com/docs/loki/setup/install/local/#install-using-apt-or-rpm-package-manager). +See the instructions [here](https://grafana.com/docs/loki//setup/install/local/#install-using-apt-or-rpm-package-manager). -## Install using Docker +## Install using Docker ```bash # modify tag to most recent version @@ -45,6 +45,7 @@ helm repo update ``` Create the configuration file `values.yaml`. The example below illustrates a connection to the locally deployed loki server: + ```yaml config: # publish data to loki From 587a6d20e938f4f58e5a49563a3c267762cf89eb Mon Sep 17 00:00:00 2001 From: Shantanu Alshi Date: Tue, 23 Apr 2024 21:02:06 +0530 Subject: [PATCH 05/16] feat: Detected labels from store (#12441) --- pkg/ingester/ingester.go | 1 - pkg/ingester/ingester_test.go | 123 ++++++++++++ pkg/ingester/instance.go | 26 ++- pkg/ingester/instance_test.go | 49 +++++ pkg/logql/metrics.go | 42 +++- pkg/loki/modules.go | 3 +- pkg/querier/ingester_querier.go | 17 +- pkg/querier/ingester_querier_test.go | 81 ++++---- pkg/querier/multi_tenant_querier.go | 6 +- pkg/querier/querier.go | 94 +++++++-- pkg/querier/querier_mock_test.go | 10 + pkg/querier/querier_test.go | 290 ++++++++++++++++++++++++++- pkg/querier/queryrange/codec.go | 12 -- pkg/querier/queryrange/roundtrip.go | 43 +++- 14 files changed, 714 insertions(+), 83 deletions(-) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index b0d197623d3c..e99de0d1531e 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1395,7 +1395,6 @@ func (i *Ingester) GetDetectedLabels(ctx context.Context, req *logproto.Detected if err != nil { return nil, err } - level.Info(i.logger).Log("msg", matchers) } labelMap, err := instance.LabelsWithValues(ctx, *req.Start, matchers...) diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 94fd5700c680..6722e6cfccf3 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -784,6 +784,129 @@ func Test_InMemoryLabels(t *testing.T) { require.Equal(t, []string{"bar", "foo"}, res.Values) } +func TestIngester_GetDetectedLabels(t *testing.T) { + ctx := user.InjectOrgID(context.Background(), "test") + + ingesterConfig := defaultIngesterTestConfig(t) + limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) + require.NoError(t, err) + store := &mockStore{ + chunks: map[string][]chunk.Chunk{}, + } + + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + require.NoError(t, err) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + // Push labels + req := logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: `{foo="bar",bar="baz1"}`, + }, + { + Labels: `{foo="bar",bar="baz2"}`, + }, + { + Labels: `{foo="bar1",bar="baz3"}`, + }, + { + Labels: `{foo="foo1",bar="baz1"}`, + }, + { + Labels: `{foo="foo",bar="baz1"}`, + }, + }, + } + for i := 0; i < 10; i++ { + req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: fmt.Sprintf("line %d", i), + }) + req.Streams[1].Entries = append(req.Streams[1].Entries, logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: fmt.Sprintf("line %d", i), + }) + } + + _, err = i.Push(ctx, &req) + require.NoError(t, err) + + res, err := i.GetDetectedLabels(ctx, &logproto.DetectedLabelsRequest{ + Start: &[]time.Time{time.Now().Add(11 * time.Nanosecond)}[0], + End: nil, + Query: "", + }) + + require.NoError(t, err) + fooValues, ok := res.Labels["foo"] + require.True(t, ok) + barValues, ok := res.Labels["bar"] + require.True(t, ok) + require.Equal(t, 4, len(fooValues.Values)) + require.Equal(t, 3, len(barValues.Values)) +} + +func TestIngester_GetDetectedLabelsWithQuery(t *testing.T) { + ctx := user.InjectOrgID(context.Background(), "test") + + ingesterConfig := defaultIngesterTestConfig(t) + limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) + require.NoError(t, err) + store := &mockStore{ + chunks: map[string][]chunk.Chunk{}, + } + + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + require.NoError(t, err) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + // Push labels + req := logproto.PushRequest{ + Streams: []logproto.Stream{ + { + Labels: `{foo="bar",bar="baz1"}`, + }, + { + Labels: `{foo="bar",bar="baz2"}`, + }, + { + Labels: `{foo="bar1",bar="baz3"}`, + }, + { + Labels: `{foo="foo1",bar="baz4"}`, + }, + }, + } + for i := 0; i < 10; i++ { + req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: fmt.Sprintf("line %d", i), + }) + req.Streams[1].Entries = append(req.Streams[1].Entries, logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: fmt.Sprintf("line %d", i), + }) + } + + _, err = i.Push(ctx, &req) + require.NoError(t, err) + + res, err := i.GetDetectedLabels(ctx, &logproto.DetectedLabelsRequest{ + Start: &[]time.Time{time.Now().Add(11 * time.Nanosecond)}[0], + End: nil, + Query: `{foo="bar"}`, + }) + + require.NoError(t, err) + fooValues, ok := res.Labels["foo"] + require.True(t, ok) + barValues, ok := res.Labels["bar"] + require.True(t, ok) + require.Equal(t, 1, len(fooValues.Values)) + require.Equal(t, 2, len(barValues.Values)) +} + func Test_DedupeIngester(t *testing.T) { var ( requests = int64(400) diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index 7e9112fad146..eb98f8a39b63 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -588,9 +588,31 @@ type UniqueValues map[string]struct{} // LabelsWithValues returns the label names with all the unique values depending on the request func (i *instance) LabelsWithValues(ctx context.Context, startTime time.Time, matchers ...*labels.Matcher) (map[string]UniqueValues, error) { - // TODO (shantanu): Figure out how to get the label names from index directly when no matchers are given. - labelMap := make(map[string]UniqueValues) + if len(matchers) == 0 { + labelsFromIndex, err := i.index.LabelNames(startTime, nil) + if err != nil { + return nil, err + } + + for _, label := range labelsFromIndex { + values, err := i.index.LabelValues(startTime, label, nil) + if err != nil { + return nil, err + } + existingValues, exists := labelMap[label] + if !exists { + existingValues = make(map[string]struct{}) + } + for _, v := range values { + existingValues[v] = struct{}{} + } + labelMap[label] = existingValues + } + + return labelMap, nil + } + err := i.forMatchingStreams(ctx, startTime, matchers, nil, func(s *stream) error { for _, label := range s.labels { v, exists := labelMap[label.Name] diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index 0cd583825124..acc5864fc557 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -1480,6 +1480,55 @@ func insertData(t *testing.T, instance *instance) { } } +func TestInstance_LabelsWithValues(t *testing.T) { + instance, currentTime, _ := setupTestStreams(t) + start := []time.Time{currentTime.Add(11 * time.Nanosecond)}[0] + m, err := labels.NewMatcher(labels.MatchEqual, "app", "test") + require.NoError(t, err) + + t.Run("label names with no matchers returns all detected labels", func(t *testing.T) { + var matchers []*labels.Matcher + res, err := instance.LabelsWithValues(context.Background(), start, matchers...) + completeResponse := map[string]UniqueValues{ + "app": map[string]struct{}{ + "test": {}, + "test2": {}, + }, + "job": map[string]struct{}{ + "varlogs": {}, + "varlogs2": {}, + }, + } + require.NoError(t, err) + require.Equal(t, completeResponse, res) + }) + + t.Run("label names with matcher returns response with matching detected labels", func(t *testing.T) { + matchers := []*labels.Matcher{m} + res, err := instance.LabelsWithValues(context.Background(), start, matchers...) + responseWithMatchingLabel := map[string]UniqueValues{ + "app": map[string]struct{}{ + "test": {}, + }, + "job": map[string]struct{}{ + "varlogs": {}, + "varlogs2": {}, + }, + } + require.NoError(t, err) + require.Equal(t, responseWithMatchingLabel, res) + }) + + t.Run("label names matchers and no start time returns a empty response", func(t *testing.T) { + matchers := []*labels.Matcher{m} + var st time.Time + res, err := instance.LabelsWithValues(context.Background(), st, matchers...) + + require.NoError(t, err) + require.Equal(t, map[string]UniqueValues{}, res) + }) +} + type fakeQueryServer func(*logproto.QueryResponse) error func (f fakeQueryServer) Send(res *logproto.QueryResponse) error { diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go index ed8405fc4e6e..052446c6b5b7 100644 --- a/pkg/logql/metrics.go +++ b/pkg/logql/metrics.go @@ -580,6 +580,44 @@ func extractShard(shards []string) *astmapper.ShardAnnotation { return &shard } -func RecordDetectedLabelsQueryMetrics(_ context.Context, _ log.Logger, _ time.Time, _ time.Time, _ string, _ string, _ logql_stats.Result) { - // TODO(shantanu) log metrics here +func RecordDetectedLabelsQueryMetrics(ctx context.Context, log log.Logger, start time.Time, end time.Time, query string, status string, stats logql_stats.Result) { + var ( + logger = fixLogger(ctx, log) + latencyType = latencyTypeFast + queryType = QueryTypeVolume + ) + + // Tag throughput metric by latency type based on a threshold. + // Latency below the threshold is fast, above is slow. + if stats.Summary.ExecTime > slowQueryThresholdSecond { + latencyType = latencyTypeSlow + } + + rangeType := "range" + + level.Info(logger).Log( + "api", "detected_labels", + "latency", latencyType, + "query_type", queryType, + "query", query, + "query_hash", util.HashedQuery(query), + "start", start.Format(time.RFC3339Nano), + "end", end.Format(time.RFC3339Nano), + "start_delta", time.Since(start), + "end_delta", time.Since(end), + "range_type", rangeType, + "length", end.Sub(start), + "duration", time.Duration(int64(stats.Summary.ExecTime*float64(time.Second))), + "status", status, + "splits", stats.Summary.Splits, + "total_entries", stats.Summary.TotalEntriesReturned, + // cache is accumulated by middleware used by the frontend only; logs from the queriers will not show cache stats + //"cache_volume_results_req", stats.Caches.VolumeResult.EntriesRequested, + //"cache_volume_results_hit", stats.Caches.VolumeResult.EntriesFound, + //"cache_volume_results_stored", stats.Caches.VolumeResult.EntriesStored, + //"cache_volume_results_download_time", stats.Caches.VolumeResult.CacheDownloadTime(), + //"cache_volume_results_query_length_served", stats.Caches.VolumeResult.CacheQueryLengthServed(), + ) + + execLatency.WithLabelValues(status, queryType, "").Observe(stats.Summary.ExecTime) } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index ab7bff17bc9b..1473a5616fd3 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -889,7 +889,8 @@ func (t *Loki) setupAsyncStore() error { } func (t *Loki) initIngesterQuerier() (_ services.Service, err error) { - t.ingesterQuerier, err = querier.NewIngesterQuerier(t.Cfg.IngesterClient, t.ring, t.Cfg.Querier.ExtraQueryDelay, t.Cfg.MetricsNamespace) + logger := log.With(util_log.Logger, "component", "querier") + t.ingesterQuerier, err = querier.NewIngesterQuerier(t.Cfg.IngesterClient, t.ring, t.Cfg.Querier.ExtraQueryDelay, t.Cfg.MetricsNamespace, logger) if err != nil { return nil, err } diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go index 6ac1db113f6f..e99fe6882df4 100644 --- a/pkg/querier/ingester_querier.go +++ b/pkg/querier/ingester_querier.go @@ -6,6 +6,8 @@ import ( "strings" "time" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "golang.org/x/exp/slices" "github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume" @@ -41,23 +43,25 @@ type IngesterQuerier struct { ring ring.ReadRing pool *ring_client.Pool extraQueryDelay time.Duration + logger log.Logger } -func NewIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, metricsNamespace string) (*IngesterQuerier, error) { +func NewIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, metricsNamespace string, logger log.Logger) (*IngesterQuerier, error) { factory := func(addr string) (ring_client.PoolClient, error) { return client.New(clientCfg, addr) } - return newIngesterQuerier(clientCfg, ring, extraQueryDelay, ring_client.PoolAddrFunc(factory), metricsNamespace) + return newIngesterQuerier(clientCfg, ring, extraQueryDelay, ring_client.PoolAddrFunc(factory), metricsNamespace, logger) } // newIngesterQuerier creates a new IngesterQuerier and allows to pass a custom ingester client factory // used for testing purposes -func newIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, clientFactory ring_client.PoolFactory, metricsNamespace string) (*IngesterQuerier, error) { +func newIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, clientFactory ring_client.PoolFactory, metricsNamespace string, logger log.Logger) (*IngesterQuerier, error) { iq := IngesterQuerier{ ring: ring, pool: clientpool.NewPool("ingester", clientCfg.PoolConfig, ring, clientFactory, util_log.Logger, metricsNamespace), extraQueryDelay: extraQueryDelay, + logger: logger, } err := services.StartAndAwaitRunning(context.Background(), iq.pool) @@ -364,12 +368,17 @@ func (q *IngesterQuerier) DetectedLabel(ctx context.Context, req *logproto.Detec }) if err != nil { + level.Error(q.logger).Log("msg", "error getting detected labels", "err", err) return nil, err } labelMap := make(map[string][]string) for _, resp := range ingesterResponses { - thisIngester := resp.response.(*logproto.LabelToValuesResponse) + thisIngester, ok := resp.response.(*logproto.LabelToValuesResponse) + if !ok { + level.Warn(q.logger).Log("msg", "Cannot convert response to LabelToValuesResponse in detectedlabels", + "response", resp) + } for label, thisIngesterValues := range thisIngester.Labels { var combinedValues []string diff --git a/pkg/querier/ingester_querier_test.go b/pkg/querier/ingester_querier_test.go index d2cb00d82ec5..713c170f7dea 100644 --- a/pkg/querier/ingester_querier_test.go +++ b/pkg/querier/ingester_querier_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/go-kit/log" "go.uber.org/atomic" "google.golang.org/grpc/codes" @@ -104,13 +105,8 @@ func TestIngesterQuerier_earlyExitOnQuorum(t *testing.T) { } else { ingesterClient.On(testData.method, mock.Anything, mock.Anything, mock.Anything).Return(testData.retVal, nil).Run(runFn) } - ingesterQuerier, err := newIngesterQuerier( - mockIngesterClientConfig(), - newReadRingMock(ringIngesters, 1), - mockQuerierConfig().ExtraQueryDelay, - newIngesterClientMockFactory(ingesterClient), - constants.Loki, - ) + + ingesterQuerier, err := newTestIngesterQuerier(newReadRingMock(ringIngesters, 1), ingesterClient) require.NoError(t, err) wg.Add(3) @@ -204,13 +200,7 @@ func TestIngesterQuerier_earlyExitOnQuorum(t *testing.T) { } else { ingesterClient.On(testData.method, mock.Anything, mock.Anything, mock.Anything).Return(testData.retVal, nil).Run(runFn) } - ingesterQuerier, err := newIngesterQuerier( - mockIngesterClientConfig(), - newReadRingMock(ringIngesters, 1), - mockQuerierConfig().ExtraQueryDelay, - newIngesterClientMockFactory(ingesterClient), - constants.Loki, - ) + ingesterQuerier, err := newTestIngesterQuerier(newReadRingMock(ringIngesters, 1), ingesterClient) require.NoError(t, err) wg.Add(3) @@ -302,13 +292,7 @@ func TestQuerier_tailDisconnectedIngesters(t *testing.T) { ingesterClient := newQuerierClientMock() ingesterClient.On("Tail", mock.Anything, &req, mock.Anything).Return(newTailClientMock(), nil) - ingesterQuerier, err := newIngesterQuerier( - mockIngesterClientConfig(), - newReadRingMock(testData.ringIngesters, 0), - mockQuerierConfig().ExtraQueryDelay, - newIngesterClientMockFactory(ingesterClient), - constants.Loki, - ) + ingesterQuerier, err := newTestIngesterQuerier(newReadRingMock(testData.ringIngesters, 0), ingesterClient) require.NoError(t, err) actualClients, err := ingesterQuerier.TailDisconnectedIngesters(context.Background(), &req, testData.connectedIngestersAddr) @@ -365,13 +349,7 @@ func TestIngesterQuerier_Volume(t *testing.T) { ingesterClient := newQuerierClientMock() ingesterClient.On("GetVolume", mock.Anything, mock.Anything, mock.Anything).Return(ret, nil) - ingesterQuerier, err := newIngesterQuerier( - mockIngesterClientConfig(), - newReadRingMock([]ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)}, 0), - mockQuerierConfig().ExtraQueryDelay, - newIngesterClientMockFactory(ingesterClient), - constants.Loki, - ) + ingesterQuerier, err := newTestIngesterQuerier(newReadRingMock([]ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)}, 0), ingesterClient) require.NoError(t, err) volumes, err := ingesterQuerier.Volume(context.Background(), "", 0, 1, 10, nil, "labels") @@ -386,13 +364,7 @@ func TestIngesterQuerier_Volume(t *testing.T) { ingesterClient := newQuerierClientMock() ingesterClient.On("GetVolume", mock.Anything, mock.Anything, mock.Anything).Return(nil, status.Error(codes.Unimplemented, "something bad")) - ingesterQuerier, err := newIngesterQuerier( - mockIngesterClientConfig(), - newReadRingMock([]ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)}, 0), - mockQuerierConfig().ExtraQueryDelay, - newIngesterClientMockFactory(ingesterClient), - constants.Loki, - ) + ingesterQuerier, err := newTestIngesterQuerier(newReadRingMock([]ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)}, 0), ingesterClient) require.NoError(t, err) volumes, err := ingesterQuerier.Volume(context.Background(), "", 0, 1, 10, nil, "labels") @@ -401,3 +373,42 @@ func TestIngesterQuerier_Volume(t *testing.T) { require.Equal(t, []logproto.Volume(nil), volumes.Volumes) }) } + +func TestIngesterQuerier_DetectedLabels(t *testing.T) { + t.Run("it returns all unique detected labels from all ingesters", func(t *testing.T) { + req := logproto.DetectedLabelsRequest{} + + ingesterClient := newQuerierClientMock() + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "cluster": {Values: []string{"ingester"}}, + "foo": {Values: []string{"abc", "abc", "ghi"}}, + "bar": {Values: []string{"cgi", "def"}}, + "all-ids": {Values: []string{"1", "3", "3", "3"}}, + }}, nil) + + readRingMock := newReadRingMock([]ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)}, 0) + ingesterQuerier, err := newTestIngesterQuerier(readRingMock, ingesterClient) + require.NoError(t, err) + + detectedLabels, err := ingesterQuerier.DetectedLabel(context.Background(), &req) + require.NoError(t, err) + + require.Equal(t, &logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "all-ids": {Values: []string{"1", "3"}}, + "bar": {Values: []string{"cgi", "def"}}, + "cluster": {Values: []string{"ingester"}}, + "foo": {Values: []string{"abc", "ghi"}}, + }}, detectedLabels) + }) +} + +func newTestIngesterQuerier(readRingMock *readRingMock, ingesterClient *querierClientMock) (*IngesterQuerier, error) { + return newIngesterQuerier( + mockIngesterClientConfig(), + readRingMock, + mockQuerierConfig().ExtraQueryDelay, + newIngesterClientMockFactory(ingesterClient), + constants.Loki, + log.NewNopLogger(), + ) +} diff --git a/pkg/querier/multi_tenant_querier.go b/pkg/querier/multi_tenant_querier.go index 54897200036d..654ae7c2deb2 100644 --- a/pkg/querier/multi_tenant_querier.go +++ b/pkg/querier/multi_tenant_querier.go @@ -284,7 +284,6 @@ func (q *MultiTenantQuerier) DetectedFields(ctx context.Context, req *logproto.D } func (q *MultiTenantQuerier) DetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) { - // TODO(shantanu) tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, err @@ -294,7 +293,10 @@ func (q *MultiTenantQuerier) DetectedLabels(ctx context.Context, req *logproto.D return q.Querier.DetectedLabels(ctx, req) } - //resp := make([]*logproto.DetectedLabels, len(tenantIDs)) + level.Debug(q.logger).Log( + "msg", "detected labels requested for multiple tenants, but not yet supported. returning static labels", + "tenantIDs", strings.Join(tenantIDs, ","), + ) return &logproto.DetectedLabelsResponse{ DetectedLabels: []*logproto.DetectedLabel{ diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index b2e61b1bdb68..bee850fd82d6 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -909,12 +909,27 @@ func (q *SingleTenantQuerier) Volume(ctx context.Context, req *logproto.VolumeRe return seriesvolume.Merge(responses, req.Limit), nil } +// DetectedLabels fetches labels and values from store and ingesters and filters them by relevance criteria as per logs app. func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) { - var ingesterLabels *logproto.LabelToValuesResponse + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } var detectedLabels []*logproto.DetectedLabel + staticLabels := map[string]struct{}{"cluster": {}, "namespace": {}, "instance": {}, "pod": {}} + // Enforce the query timeout while querying backends + queryTimeout := q.limits.QueryTimeout(ctx, userID) + ctx, cancel := context.WithDeadline(ctx, time.Now().Add(queryTimeout)) + defer cancel() g, ctx := errgroup.WithContext(ctx) - ingesterQueryInterval, _ := q.buildQueryIntervals(*req.Start, *req.End) + + if *req.Start, *req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, *req.Start, *req.End); err != nil { + return nil, err + } + ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(*req.Start, *req.End) + + var ingesterLabels *logproto.LabelToValuesResponse if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { g.Go(func() error { var err error @@ -923,7 +938,33 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. splitReq.End = &ingesterQueryInterval.end ingesterLabels, err = q.ingesterQuerier.DetectedLabel(ctx, &splitReq) - level.Info(q.logger).Log("msg", ingesterLabels) + return err + }) + } + + storeLabelsMap := make(map[string][]string) + if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { + var matchers []*labels.Matcher + if req.Query != "" { + matchers, err = syntax.ParseMatchers(req.Query, true) + if err != nil { + return nil, err + } + } + g.Go(func() error { + var err error + start := model.TimeFromUnixNano(storeQueryInterval.start.UnixNano()) + end := model.TimeFromUnixNano(storeQueryInterval.end.UnixNano()) + storeLabels, err := q.store.LabelNamesForMetricName(ctx, userID, start, end, "logs") + for _, label := range storeLabels { + values, err := q.store.LabelValuesForMetricName(ctx, userID, start, end, "logs", label, matchers...) + if err != nil { + return err + } + if q.isLabelRelevant(label, values, staticLabels) { + storeLabelsMap[label] = values + } + } return err }) } @@ -932,18 +973,43 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. return nil, err } - if ingesterLabels == nil { + if ingesterLabels == nil && len(storeLabelsMap) == 0 { return &logproto.DetectedLabelsResponse{ DetectedLabels: []*logproto.DetectedLabel{}, }, nil } - for label, values := range ingesterLabels.Labels { - if q.isLabelRelevant(label, values) { - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: label, Cardinality: uint64(len(values.Values))}) + // append static labels before so they are in sorted order + for l := range staticLabels { + if values, present := ingesterLabels.Labels[l]; present { + detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: l, Cardinality: uint64(len(values.Values))}) } } + if ingesterLabels != nil { + for label, values := range ingesterLabels.Labels { + if q.isLabelRelevant(label, values.Values, staticLabels) { + combinedValues := values.Values + storeValues, storeHasLabel := storeLabelsMap[label] + if storeHasLabel { + combinedValues = append(combinedValues, storeValues...) + } + + slices.Sort(combinedValues) + uniqueValues := slices.Compact(combinedValues) + // TODO(shantanu): There's a bug here. Unique values can go above 50. Will need a bit of refactoring + detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: label, Cardinality: uint64(len(uniqueValues))}) + delete(storeLabelsMap, label) + } + } + } + + for label, values := range storeLabelsMap { + slices.Sort(values) + uniqueValues := slices.Compact(values) + detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: label, Cardinality: uint64(len(uniqueValues))}) + } + return &logproto.DetectedLabelsResponse{ DetectedLabels: detectedLabels, }, nil @@ -965,13 +1031,13 @@ func (q *SingleTenantQuerier) Patterns(ctx context.Context, req *logproto.QueryP return res, err } -func (q *SingleTenantQuerier) isLabelRelevant(label string, values *logproto.UniqueLabelValues) bool { - staticLabels := []string{"pod", "namespace", "cluster", "instance"} - cardinality := len(values.Values) - // TODO(shantanu) make these values configurable - if !slices.Contains(staticLabels, label) && - (cardinality < 1 || cardinality > 50) || - containsAllIDTypes(values.Values) { +// isLabelRelevant returns if the label is relevant for logs app. A label is relevant if it is not of any numeric, UUID or GUID type +// It is also not relevant to return if the values are less than 1 or beyond 50. +func (q *SingleTenantQuerier) isLabelRelevant(label string, values []string, staticLabels map[string]struct{}) bool { + cardinality := len(values) + _, isStaticLabel := staticLabels[label] + if isStaticLabel || (cardinality < 2 || cardinality > 50) || + containsAllIDTypes(values) { return false } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 3d5edc50b831..6d025a9e0db5 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -111,6 +111,16 @@ func (c *querierClientMock) GetChunkIDs(ctx context.Context, in *logproto.GetChu return res.(*logproto.GetChunkIDsResponse), args.Error(1) } +func (c *querierClientMock) GetDetectedLabels(ctx context.Context, in *logproto.DetectedLabelsRequest, opts ...grpc.CallOption) (*logproto.LabelToValuesResponse, error) { + args := c.Called(ctx, in, opts) + res := args.Get(0) + if res == nil { + return (*logproto.LabelToValuesResponse)(nil), args.Error(1) + } + return res.(*logproto.LabelToValuesResponse), args.Error(1) + +} + func (c *querierClientMock) GetVolume(ctx context.Context, in *logproto.VolumeRequest, opts ...grpc.CallOption) (*logproto.VolumeResponse, error) { args := c.Called(ctx, in, opts) res := args.Get(0) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index df87a72df366..e6c228f04920 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -5,6 +5,7 @@ import ( "errors" "io" "net/http" + "strconv" "testing" "time" @@ -19,6 +20,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + util_log "github.com/grafana/loki/v3/pkg/util/log" + "github.com/grafana/loki/v3/pkg/compactor/deletion" "github.com/grafana/loki/v3/pkg/ingester/client" "github.com/grafana/loki/v3/pkg/logproto" @@ -1148,6 +1151,13 @@ func setupIngesterQuerierMocks(conf Config, limits *validation.Overrides) (*quer }, }, }, nil) + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.DetectedLabelsResponse{ + DetectedLabels: []*logproto.DetectedLabel{ + {Label: "pod", Cardinality: 1}, + {Label: "namespace", Cardinality: 3}, + {Label: "customerId", Cardinality: 200}, + }, + }, nil) store := newStoreMock() store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil) @@ -1351,7 +1361,7 @@ func TestQuerier_SelectSamplesWithDeletes(t *testing.T) { } func newQuerier(cfg Config, clientCfg client.Config, clientFactory ring_client.PoolFactory, ring ring.ReadRing, dg *mockDeleteGettter, store storage.Store, limits *validation.Overrides) (*SingleTenantQuerier, error) { - iq, err := newIngesterQuerier(clientCfg, ring, cfg.ExtraQueryDelay, clientFactory, constants.Loki) + iq, err := newIngesterQuerier(clientCfg, ring, cfg.ExtraQueryDelay, clientFactory, constants.Loki, util_log.Logger) if err != nil { return nil, err } @@ -1373,44 +1383,306 @@ func TestQuerier_isLabelRelevant(t *testing.T) { for _, tc := range []struct { name string label string - values *logproto.UniqueLabelValues + values []string expected bool }{ { label: "uuidv4 values are not relevant", - values: &logproto.UniqueLabelValues{Values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}}, + values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}, expected: false, }, { label: "guid values are not relevant", - values: &logproto.UniqueLabelValues{Values: []string{"57808f62-f117-4a22-84a0-bc3282c7f106", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}}, + values: []string{"57808f62-f117-4a22-84a0-bc3282c7f106", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}, expected: false, }, { label: "integer values are not relevant", - values: &logproto.UniqueLabelValues{Values: []string{"1", "2", "3", "4"}}, + values: []string{"1", "2", "3", "4"}, expected: false, }, { label: "string values are relevant", - values: &logproto.UniqueLabelValues{Values: []string{"ingester", "querier", "query-frontend", "index-gateway"}}, + values: []string{"ingester", "querier", "query-frontend", "index-gateway"}, expected: true, }, { label: "guid with braces are not relevant", - values: &logproto.UniqueLabelValues{Values: []string{"{E9550CF7-58D9-48B9-8845-D9800C651AAC}", "{1617921B-1749-4FF0-A058-31AFB5D98149}", "{C119D92E-A4B9-48A3-A92C-6CA8AA8A6CCC}", "{228AAF1D-2DE7-4909-A4E9-246A7FA9D988}"}}, + values: []string{"{E9550CF7-58D9-48B9-8845-D9800C651AAC}", "{1617921B-1749-4FF0-A058-31AFB5D98149}", "{C119D92E-A4B9-48A3-A92C-6CA8AA8A6CCC}", "{228AAF1D-2DE7-4909-A4E9-246A7FA9D988}"}, expected: false, }, { label: "float values are not relevant", - values: &logproto.UniqueLabelValues{Values: []string{"1.2", "2.5", "3.3", "4.1"}}, + values: []string{"1.2", "2.5", "3.3", "4.1"}, expected: false, }, } { t.Run(tc.name, func(t *testing.T) { querier := &SingleTenantQuerier{cfg: mockQuerierConfig()} - assert.Equal(t, tc.expected, querier.isLabelRelevant(tc.label, tc.values)) + assert.Equal(t, tc.expected, querier.isLabelRelevant(tc.label, tc.values, map[string]struct{}{"host": {}, "cluster": {}, "namespace": {}, "instance": {}, "pod": {}})) }) } } + +func TestQuerier_DetectedLabels(t *testing.T) { + manyValues := []string{} + now := time.Now() + for i := 0; i < 60; i++ { + manyValues = append(manyValues, "a"+strconv.Itoa(i)) + } + + limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) + require.NoError(t, err) + ctx := user.InjectOrgID(context.Background(), "test") + + conf := mockQuerierConfig() + conf.IngesterQueryStoreMaxLookback = 0 + + request := logproto.DetectedLabelsRequest{ + Start: &now, + End: &now, + Query: "", + } + + t.Run("when both store and ingester responses are present, a combined response is returned", func(t *testing.T) { + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "cluster": {Values: []string{"ingester"}}, + "ingesterLabel": {Values: []string{"abc", "def", "ghi", "abc"}}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{"storeLabel"}, nil). + On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, "storeLabel", mock.Anything). + Return([]string{"val1", "val2"}, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + resp, err := querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + + calls := ingesterClient.GetMockedCallsByMethod("GetDetectedLabels") + assert.Equal(t, 1, len(calls)) + + detectedLabels := resp.DetectedLabels + assert.Len(t, detectedLabels, 3) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel", Cardinality: 2}) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) + }) + + t.Run("when both store and ingester responses are present, duplicates are removed", func(t *testing.T) { + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "cluster": {Values: []string{"ingester"}}, + "ingesterLabel": {Values: []string{"abc", "def", "ghi", "abc"}}, + "commonLabel": {Values: []string{"abc", "def", "ghi", "abc"}}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{"storeLabel", "commonLabel"}, nil). + On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, "storeLabel", mock.Anything). + Return([]string{"val1", "val2"}, nil). + On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, "commonLabel", mock.Anything). + Return([]string{"def", "xyz", "lmo", "abc"}, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + resp, err := querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + + calls := ingesterClient.GetMockedCallsByMethod("GetDetectedLabels") + assert.Equal(t, 1, len(calls)) + + detectedLabels := resp.DetectedLabels + assert.Len(t, detectedLabels, 4) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel", Cardinality: 2}) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "commonLabel", Cardinality: 5}) + }) + + t.Run("returns a response when ingester data is empty", func(t *testing.T) { + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&logproto.LabelToValuesResponse{}, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{"storeLabel1", "storeLabel2"}, nil). + On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, "storeLabel1", mock.Anything). + Return([]string{"val1", "val2"}, nil). + On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, "storeLabel2", mock.Anything). + Return([]string{"val1", "val2"}, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + resp, err := querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + + detectedLabels := resp.DetectedLabels + assert.Len(t, detectedLabels, 2) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel1", Cardinality: 2}) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel2", Cardinality: 2}) + }) + + t.Run("returns a response when store data is empty", func(t *testing.T) { + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "cluster": {Values: []string{"ingester"}}, + "ingesterLabel": {Values: []string{"abc", "def", "ghi", "abc"}}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{}, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + resp, err := querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + + detectedLabels := resp.DetectedLabels + assert.Len(t, detectedLabels, 2) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "cluster", Cardinality: 1}) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) + }) + + t.Run("id types like uuids, guids and numbers are not relevant detected labels", func(t *testing.T) { + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "all-ints": {Values: []string{"1", "2", "3", "4"}}, + "all-floats": {Values: []string{"1.2", "2.3", "3.4", "4.5"}}, + "all-uuids": {Values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{}, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + resp, err := querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + + detectedLabels := resp.DetectedLabels + assert.Len(t, detectedLabels, 0) + }) + + t.Run("labels with more than required cardinality are not relevant", func(t *testing.T) { + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "less-than-m-values": {Values: []string{"val1"}}, + "more-than-n-values": {Values: manyValues}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{}, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + resp, err := querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + + detectedLabels := resp.DetectedLabels + assert.Len(t, detectedLabels, 0) + }) + + t.Run("static labels are always returned no matter their cardinality or value types", func(t *testing.T) { + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "cluster": {Values: []string{"val1"}}, + "namespace": {Values: manyValues}, + "pod": {Values: []string{"1", "2", "3", "4"}}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{}, nil) + request := logproto.DetectedLabelsRequest{ + Start: &now, + End: &now, + Query: "", + } + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + resp, err := querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + + detectedLabels := resp.DetectedLabels + assert.Len(t, detectedLabels, 3) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "cluster", Cardinality: 1}) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "pod", Cardinality: 4}) + assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "namespace", Cardinality: 60}) + }) +} diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 874313d49826..f73eef10d557 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -269,18 +269,6 @@ type DetectedLabelsRequest struct { logproto.DetectedLabelsRequest } -// NewDetectedLabelsRequest creates a new request for detected labels -func NewDetectedLabelsRequest(start, end time.Time, query, path string) *DetectedLabelsRequest { - return &DetectedLabelsRequest{ - DetectedLabelsRequest: logproto.DetectedLabelsRequest{ - Start: &start, - End: &end, - Query: query, - }, - path: path, - } -} - func (r *DetectedLabelsRequest) AsProto() *logproto.DetectedLabelsRequest { return &r.DetectedLabelsRequest } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index e5b9db82cd27..ff7c4ba4dbff 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -253,6 +253,19 @@ func NewMiddleware( return nil, nil, err } + detectedLabelsTripperware, err := NewDetectedLabelsTripperware( + cfg, + engineOpts, + log, + limits, + schema, + metrics, + indexStatsTripperware, + metricsNamespace) + + if err != nil { + return nil, nil, err + } return base.MiddlewareFunc(func(next base.Handler) base.Handler { var ( metricRT = metricsTripperware.Wrap(next) @@ -264,13 +277,41 @@ func NewMiddleware( statsRT = indexStatsTripperware.Wrap(next) seriesVolumeRT = seriesVolumeTripperware.Wrap(next) detectedFieldsRT = detectedFieldsTripperware.Wrap(next) - detectedLabelsRT = next // TODO(shantanu): add middlewares + detectedLabelsRT = detectedLabelsTripperware.Wrap(next) ) return newRoundTripper(log, next, limitedRT, logFilterRT, metricRT, seriesRT, labelsRT, instantRT, statsRT, seriesVolumeRT, detectedFieldsRT, detectedLabelsRT, limits) }), StopperWrapper{resultsCache, statsCache, volumeCache}, nil } +func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log.Logger, l Limits, schema config.SchemaConfig, metrics *Metrics, mw base.Middleware, namespace string) (base.Middleware, error) { + return base.MiddlewareFunc(func(next base.Handler) base.Handler { + statsHandler := mw.Wrap(next) + + queryRangeMiddleware := []base.Middleware{ + StatsCollectorMiddleware(), + NewLimitsMiddleware(l), + NewQuerySizeLimiterMiddleware(schema.Configs, opts, logger, l, statsHandler), + base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + } + + // The sharding middleware takes care of enforcing this limit for both shardable and non-shardable queries. + // If we are not using sharding, we enforce the limit by adding this middleware after time splitting. + queryRangeMiddleware = append(queryRangeMiddleware, + NewQuerierSizeLimiterMiddleware(schema.Configs, opts, logger, l, statsHandler), + ) + + if cfg.MaxRetries > 0 { + queryRangeMiddleware = append( + queryRangeMiddleware, base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(logger, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, namespace), + ) + } + + return NewLimitedRoundTripper(next, l, schema.Configs, queryRangeMiddleware...) + }), nil +} + type roundTripper struct { logger log.Logger From 210ea93a690b1b9746b3ff62bbd5d217a3bc8e8e Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Tue, 23 Apr 2024 10:18:02 -0600 Subject: [PATCH 06/16] feat: add detected-fields command to logcli (#12739) --- cmd/logcli/main.go | 74 ++++++++++++++++++++++++++++++++++ pkg/logcli/client/client.go | 53 +++++++++++++++++++----- pkg/logcli/client/file.go | 17 ++++++-- pkg/logcli/detected/fields.go | 57 ++++++++++++++++++++++++++ pkg/logcli/query/query_test.go | 10 +++++ pkg/loghttp/detected.go | 14 +++++++ 6 files changed, 211 insertions(+), 14 deletions(-) create mode 100644 pkg/logcli/detected/fields.go create mode 100644 pkg/loghttp/detected.go diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go index 3d2aa85297b3..e4e04da8d665 100644 --- a/cmd/logcli/main.go +++ b/cmd/logcli/main.go @@ -16,6 +16,7 @@ import ( "gopkg.in/alecthomas/kingpin.v2" "github.com/grafana/loki/v3/pkg/logcli/client" + "github.com/grafana/loki/v3/pkg/logcli/detected" "github.com/grafana/loki/v3/pkg/logcli/index" "github.com/grafana/loki/v3/pkg/logcli/labelquery" "github.com/grafana/loki/v3/pkg/logcli/output" @@ -253,6 +254,39 @@ Example: 'my-query' `) volumeRangeQuery = newVolumeQuery(true, volumeRangeCmd) + + detectedFieldsCmd = app.Command("detected-fields", `Run a query for detected fields.. + +The "detected-fields" command will return information about fields detected using either +the "logfmt" or "json" parser against the log lines returned by the provided query for the +provided time range. + +The "detected-fields" command will output extra information about the query +and its results, such as the API URL, set of common labels, and set +of excluded labels. This extra information can be suppressed with the +--quiet flag. + +By default we look over the last hour of data; use --since to modify +or provide specific start and end times with --from and --to respectively. + +Notice that when using --from and --to then ensure to use RFC3339Nano +time format, but without timezone at the end. The local timezone will be added +automatically or if using --timezone flag. + +Example: + + logcli detected-fields + --timezone=UTC + --from="2021-01-19T10:00:00Z" + --to="2021-01-19T20:00:00Z" + --output=jsonl + 'my-query' + +The output is limited to 100 fields by default; use --field-limit to increase. +The query is limited to processing 1000 lines per subquery; use --line-limit to increase. +`) + + detectedFieldsQuery = newDetectedFieldsQuery(detectedFieldsCmd) ) func main() { @@ -388,6 +422,8 @@ func main() { } else { index.GetVolume(volumeQuery, queryClient, out, *statistics) } + case detectedFieldsCmd.FullCommand(): + detectedFieldsQuery.Do(queryClient, *outputMode) } } @@ -652,3 +688,41 @@ func newVolumeQuery(rangeQuery bool, cmd *kingpin.CmdClause) *volume.Query { return q } + +func newDetectedFieldsQuery(cmd *kingpin.CmdClause) *detected.FieldsQuery { + // calculate query range from cli params + var from, to string + var since time.Duration + + q := &detected.FieldsQuery{} + + // executed after all command flags are parsed + cmd.Action(func(c *kingpin.ParseContext) error { + defaultEnd := time.Now() + defaultStart := defaultEnd.Add(-since) + + q.Start = mustParse(from, defaultStart) + q.End = mustParse(to, defaultEnd) + + q.Quiet = *quiet + + return nil + }) + + cmd.Flag("field-limit", "Limit on number of fields to return."). + Default("100"). + IntVar(&q.FieldLimit) + cmd.Flag("line-limit", "Limit the number of lines each subquery is allowed to process."). + Default("1000"). + IntVar(&q.LineLimit) + cmd.Arg("query", "eg '{foo=\"bar\",baz=~\".*blip\"} |~ \".*error.*\"'"). + Required(). + StringVar(&q.QueryString) + cmd.Flag("since", "Lookback window.").Default("1h").DurationVar(&since) + cmd.Flag("from", "Start looking for logs at this absolute time (inclusive)").StringVar(&from) + cmd.Flag("to", "Stop looking for logs at this absolute time (exclusive)").StringVar(&to) + cmd.Flag("step", "Query resolution step width, for metric queries. Evaluate the query at the specified step over the time range."). + DurationVar(&q.Step) + + return q +} diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go index 73ddccd7efd1..e417ccfa3ce5 100644 --- a/pkg/logcli/client/client.go +++ b/pkg/logcli/client/client.go @@ -28,16 +28,17 @@ import ( ) const ( - queryPath = "/loki/api/v1/query" - queryRangePath = "/loki/api/v1/query_range" - labelsPath = "/loki/api/v1/labels" - labelValuesPath = "/loki/api/v1/label/%s/values" - seriesPath = "/loki/api/v1/series" - tailPath = "/loki/api/v1/tail" - statsPath = "/loki/api/v1/index/stats" - volumePath = "/loki/api/v1/index/volume" - volumeRangePath = "/loki/api/v1/index/volume_range" - defaultAuthHeader = "Authorization" + queryPath = "/loki/api/v1/query" + queryRangePath = "/loki/api/v1/query_range" + labelsPath = "/loki/api/v1/labels" + labelValuesPath = "/loki/api/v1/label/%s/values" + seriesPath = "/loki/api/v1/series" + tailPath = "/loki/api/v1/tail" + statsPath = "/loki/api/v1/index/stats" + volumePath = "/loki/api/v1/index/volume" + volumeRangePath = "/loki/api/v1/index/volume_range" + detectedFieldsPath = "/loki/api/v1/detected_fields" + defaultAuthHeader = "Authorization" ) var userAgent = fmt.Sprintf("loki-logcli/%s", build.Version) @@ -54,6 +55,7 @@ type Client interface { GetStats(queryStr string, start, end time.Time, quiet bool) (*logproto.IndexStatsResponse, error) GetVolume(query *volume.Query) (*loghttp.QueryResponse, error) GetVolumeRange(query *volume.Query) (*loghttp.QueryResponse, error) + GetDetectedFields(queryStr string, fieldLimit, lineLimit int, start, end time.Time, step time.Duration, quiet bool) (*loghttp.DetectedFieldsResponse, error) } // Tripperware can wrap a roundtripper. @@ -224,7 +226,36 @@ func (c *DefaultClient) getVolume(path string, query *volume.Query) (*loghttp.Qu return &resp, nil } -func (c *DefaultClient) doQuery(path string, query string, quiet bool) (*loghttp.QueryResponse, error) { +func (c *DefaultClient) GetDetectedFields( + queryStr string, + fieldLimit, lineLimit int, + start, end time.Time, + step time.Duration, + quiet bool, +) (*loghttp.DetectedFieldsResponse, error) { + qsb := util.NewQueryStringBuilder() + qsb.SetString("query", queryStr) + qsb.SetInt("field_limit", int64(fieldLimit)) + qsb.SetInt("line_limit", int64(lineLimit)) + qsb.SetInt("start", start.UnixNano()) + qsb.SetInt("end", end.UnixNano()) + qsb.SetString("step", step.String()) + + var err error + var r loghttp.DetectedFieldsResponse + + if err = c.doRequest(detectedFieldsPath, qsb.Encode(), quiet, &r); err != nil { + return nil, err + } + + return &r, nil +} + +func (c *DefaultClient) doQuery( + path string, + query string, + quiet bool, +) (*loghttp.QueryResponse, error) { var err error var r loghttp.QueryResponse diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go index dd0432a79e17..34b76422d4bc 100644 --- a/pkg/logcli/client/file.go +++ b/pkg/logcli/client/file.go @@ -190,17 +190,28 @@ func (f *FileClient) GetOrgID() string { } func (f *FileClient) GetStats(_ string, _, _ time.Time, _ bool) (*logproto.IndexStatsResponse, error) { - // TODO(trevorwhitney): could we teach logcli to read from an actual index file? + // TODO(twhitney): could we teach logcli to read from an actual index file? return nil, ErrNotSupported } func (f *FileClient) GetVolume(_ *volume.Query) (*loghttp.QueryResponse, error) { - // TODO(trevorwhitney): could we teach logcli to read from an actual index file? + // TODO(twhitney): could we teach logcli to read from an actual index file? return nil, ErrNotSupported } func (f *FileClient) GetVolumeRange(_ *volume.Query) (*loghttp.QueryResponse, error) { - // TODO(trevorwhitney): could we teach logcli to read from an actual index file? + // TODO(twhitney): could we teach logcli to read from an actual index file? + return nil, ErrNotSupported +} + +func (f *FileClient) GetDetectedFields( + _ string, + _, _ int, + _, _ time.Time, + _ time.Duration, + _ bool, +) (*loghttp.DetectedFieldsResponse, error) { + // TODO(twhitney): could we teach logcli to do this? return nil, ErrNotSupported } diff --git a/pkg/logcli/detected/fields.go b/pkg/logcli/detected/fields.go new file mode 100644 index 000000000000..f8ba585ea2a0 --- /dev/null +++ b/pkg/logcli/detected/fields.go @@ -0,0 +1,57 @@ +package detected + +import ( + "encoding/json" + "fmt" + "log" + "slices" + "strings" + "time" + + "github.com/fatih/color" + + "github.com/grafana/loki/v3/pkg/logcli/client" + "github.com/grafana/loki/v3/pkg/loghttp" +) + +type FieldsQuery struct { + QueryString string + Start time.Time + End time.Time + FieldLimit int + LineLimit int + Step time.Duration + Quiet bool + ColoredOutput bool +} + +// DoQuery executes the query and prints out the results +func (q *FieldsQuery) Do(c client.Client, outputMode string) { + var resp *loghttp.DetectedFieldsResponse + var err error + + resp, err = c.GetDetectedFields(q.QueryString, q.FieldLimit, q.LineLimit, q.Start, q.End, q.Step, q.Quiet) + if err != nil { + log.Fatalf("Error doing request: %+v", err) + } + + switch outputMode { + case "raw": + out, err := json.Marshal(resp) + if err != nil { + log.Fatalf("Error marshalling response: %+v", err) + } + fmt.Println(string(out)) + default: + output := make([]string, len(resp.Fields)) + for i, field := range resp.Fields { + bold := color.New(color.Bold) + output[i] = fmt.Sprintf("label: %s\t\t", bold.Sprintf("%s", field.Label)) + + fmt.Sprintf("type: %s\t\t", bold.Sprintf("%s", field.Type)) + + fmt.Sprintf("cardinality: %s", bold.Sprintf("%d", field.Cardinality)) + } + + slices.Sort(output) + fmt.Println(strings.Join(output, "\n")) + } +} diff --git a/pkg/logcli/query/query_test.go b/pkg/logcli/query/query_test.go index c7543fa2288d..fffdcf15444b 100644 --- a/pkg/logcli/query/query_test.go +++ b/pkg/logcli/query/query_test.go @@ -485,6 +485,16 @@ func (t *testQueryClient) GetVolumeRange(_ *volume.Query) (*loghttp.QueryRespons panic("not implemented") } +func (t *testQueryClient) GetDetectedFields( + _ string, + _, _ int, + _, _ time.Time, + _ time.Duration, + _ bool, +) (*loghttp.DetectedFieldsResponse, error) { + panic("not implemented") +} + var legacySchemaConfigContents = `schema_config: configs: - from: 2020-05-15 diff --git a/pkg/loghttp/detected.go b/pkg/loghttp/detected.go new file mode 100644 index 000000000000..d255bf6124a7 --- /dev/null +++ b/pkg/loghttp/detected.go @@ -0,0 +1,14 @@ +package loghttp + +import "github.com/grafana/loki/v3/pkg/logproto" + +// LabelResponse represents the http json response to a label query +type DetectedFieldsResponse struct { + Fields []DetectedField `json:"fields,omitempty"` +} + +type DetectedField struct { + Label string `json:"label,omitempty"` + Type logproto.DetectedFieldType `json:"type,omitempty"` + Cardinality uint64 `json:"cardinality,omitempty"` +} From 1161846e19105e2669a5b388998722c23bd0f2f4 Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Tue, 23 Apr 2024 10:21:36 -0600 Subject: [PATCH 07/16] fix(docs): Move promtail configuration to the correct doc (#12737) --- docs/sources/send-data/promtail/configuration.md | 7 +++++++ docs/sources/shared/configuration.md | 7 ------- docs/templates/configuration.template | 7 ------- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md index ce1e329c7ea0..7d210b7ec47a 100644 --- a/docs/sources/send-data/promtail/configuration.md +++ b/docs/sources/send-data/promtail/configuration.md @@ -43,6 +43,13 @@ For more detailed information on configuring how to discover and scrape logs fro targets, see [Scraping]({{< relref "./scraping" >}}). For more information on transforming logs from scraped targets, see [Pipelines]({{< relref "./pipelines" >}}). +## Reload at runtime + +Promtail can reload its configuration at runtime. If the new configuration +is not well-formed, the changes will not be applied. +A configuration reload is triggered by sending a `SIGHUP` to the Promtail process or +sending a HTTP POST request to the `/reload` endpoint (when the `--server.enable-runtime-reload` flag is enabled). + ### Use environment variables in the configuration You can use environment variable references in the configuration file to set values that need to be configurable during deployment. diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 8c2413e26250..be44ae74ca75 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -24,13 +24,6 @@ is especially useful in making sure your config files and flags are being read a `-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. -## Reload at runtime - -Promtail can reload its configuration at runtime. If the new configuration -is not well-formed, the changes will not be applied. -A configuration reload is triggered by sending a `SIGHUP` to the Promtail process or -sending a HTTP POST request to the `/reload` endpoint (when the `--server.enable-runtime-reload` flag is enabled). - ## Configuration file reference To specify which configuration file to load, pass the `-config.file` flag at the diff --git a/docs/templates/configuration.template b/docs/templates/configuration.template index d5a9f750559d..47945146b210 100644 --- a/docs/templates/configuration.template +++ b/docs/templates/configuration.template @@ -24,13 +24,6 @@ is especially useful in making sure your config files and flags are being read a `-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. -## Reload at runtime - -Promtail can reload its configuration at runtime. If the new configuration -is not well-formed, the changes will not be applied. -A configuration reload is triggered by sending a `SIGHUP` to the Promtail process or -sending a HTTP POST request to the `/reload` endpoint (when the `--server.enable-runtime-reload` flag is enabled). - ## Configuration file reference To specify which configuration file to load, pass the `-config.file` flag at the From dce86bfadae08a0a2733f285b170a2158dd71448 Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Tue, 23 Apr 2024 11:24:11 -0700 Subject: [PATCH 08/16] ci: add a github workflow to verify that the release workflows are correct (#12603) Signed-off-by: Callum Styan --- .github/jsonnetfile.json | 2 +- .github/jsonnetfile.lock.json | 4 ++-- .github/release-workflows.jsonnet | 5 +---- .../loki-release/workflows/common.libsonnet | 2 +- .github/workflows/check.yml | 2 +- .github/workflows/minor-release-pr.yml | 6 +++--- .github/workflows/patch-release-pr.yml | 6 +++--- .github/workflows/release.yml | 6 +++--- .github/workflows/verify-release-workflow.yaml | 18 ++++++++++++++++++ Makefile | 6 ++++++ 10 files changed, 39 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/verify-release-workflow.yaml diff --git a/.github/jsonnetfile.json b/.github/jsonnetfile.json index 605c762e9f8c..92b8f0db112b 100644 --- a/.github/jsonnetfile.json +++ b/.github/jsonnetfile.json @@ -8,7 +8,7 @@ "subdir": "workflows" } }, - "version": "main" + "version": "124c4d996f9625478a79f1884465e29ea082d224" } ], "legacyImports": true diff --git a/.github/jsonnetfile.lock.json b/.github/jsonnetfile.lock.json index 395ab9190e3e..4ec13c933738 100644 --- a/.github/jsonnetfile.lock.json +++ b/.github/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "workflows" } }, - "version": "634945b73e8eed4f5161ec08810178ddeca7505b", - "sum": "BOnwSjzyOjWwv9ikwJSAgPBNnYHTU2PEDJ0PWY6nr7I=" + "version": "124c4d996f9625478a79f1884465e29ea082d224", + "sum": "8wrJURq48ZBAtZcReO1W7AiXmvUyLqb932Q9sXyfFVo=" } ], "legacyImports": false diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet index bf13bb5da1bb..d21195260494 100644 --- a/.github/release-workflows.jsonnet +++ b/.github/release-workflows.jsonnet @@ -1,10 +1,7 @@ local lokiRelease = import 'workflows/main.jsonnet'; local build = lokiRelease.build; -local releaseLibRef = std.filter( - function(dep) dep.source.git.remote == 'https://github.com/grafana/loki-release.git', - (import 'jsonnetfile.json').dependencies -)[0].version; +local releaseLibRef = 'main'; local checkTemplate = 'grafana/loki-release/.github/workflows/check.yml@%s' % releaseLibRef; diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet index 48cca43890ed..03f4aa112045 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet @@ -123,7 +123,7 @@ git config --global --add safe.directory "$GITHUB_WORKSPACE" |||), - githubAppToken: $.step.new('get github app token', 'actions/github-app-token@v1') + githubAppToken: $.step.new('get github app token', 'actions/create-github-app-token@v1') + $.step.withId('get_github_app_token') + $.step.withIf('${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}') + $.step.with({ diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index b1ccc391fdb3..8d8c4acf0dad 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -12,4 +12,4 @@ "pull_request": {} "push": "branches": - - "main" + - "main" \ No newline at end of file diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index bc70363da1bb..a9e6148fb7c2 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -58,7 +58,7 @@ jobs: - id: "get_github_app_token" if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}" name: "get github app token" - uses: "actions/github-app-token@v1" + uses: "actions/create-github-app-token@v1" with: app-id: "${{ secrets.APP_ID }}" owner: "${{ github.repository_owner }}" @@ -753,7 +753,7 @@ jobs: - id: "get_github_app_token" if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}" name: "get github app token" - uses: "actions/github-app-token@v1" + uses: "actions/create-github-app-token@v1" with: app-id: "${{ secrets.APP_ID }}" owner: "${{ github.repository_owner }}" @@ -826,4 +826,4 @@ name: "Prepare Minor Release PR from Weekly" permissions: contents: "write" id-token: "write" - pull-requests: "write" + pull-requests: "write" \ No newline at end of file diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index 831ca48b4087..63e235946815 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -58,7 +58,7 @@ jobs: - id: "get_github_app_token" if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}" name: "get github app token" - uses: "actions/github-app-token@v1" + uses: "actions/create-github-app-token@v1" with: app-id: "${{ secrets.APP_ID }}" owner: "${{ github.repository_owner }}" @@ -753,7 +753,7 @@ jobs: - id: "get_github_app_token" if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}" name: "get github app token" - uses: "actions/github-app-token@v1" + uses: "actions/create-github-app-token@v1" with: app-id: "${{ secrets.APP_ID }}" owner: "${{ github.repository_owner }}" @@ -826,4 +826,4 @@ name: "Prepare Patch Release PR" permissions: contents: "write" id-token: "write" - pull-requests: "write" + pull-requests: "write" \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cecbee6513b7..0bbf7eb48465 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,7 +46,7 @@ jobs: - id: "get_github_app_token" if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}" name: "get github app token" - uses: "actions/github-app-token@v1" + uses: "actions/create-github-app-token@v1" with: app-id: "${{ secrets.APP_ID }}" owner: "${{ github.repository_owner }}" @@ -158,7 +158,7 @@ jobs: - id: "get_github_app_token" if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}" name: "get github app token" - uses: "actions/github-app-token@v1" + uses: "actions/create-github-app-token@v1" with: app-id: "${{ secrets.APP_ID }}" owner: "${{ github.repository_owner }}" @@ -219,4 +219,4 @@ name: "create release" permissions: contents: "write" id-token: "write" - pull-requests: "write" + pull-requests: "write" \ No newline at end of file diff --git a/.github/workflows/verify-release-workflow.yaml b/.github/workflows/verify-release-workflow.yaml new file mode 100644 index 000000000000..6375a2dec8ba --- /dev/null +++ b/.github/workflows/verify-release-workflow.yaml @@ -0,0 +1,18 @@ +name: Verify release workflow updates +on: [pull_request] +jobs: + check-release-changes: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: setup go + uses: actions/setup-go@v5 + with: + go-version: '1.22.2' + - name: setup jsonnet + run: | + go install github.com/google/go-jsonnet/cmd/jsonnet@v0.20.0 + go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@v0.5.1 + - name: Check that the release workflows have been updated properly + run: | + make BUILD_IN_CONTAINER=false release-workflows-check \ No newline at end of file diff --git a/Makefile b/Makefile index 1a05bf2ffc6f..01c68a4052ee 100644 --- a/Makefile +++ b/Makefile @@ -900,3 +900,9 @@ scan-vulnerabilities: trivy snyk release-workflows: pushd $(CURDIR)/.github && jb update && popd jsonnet -SJ .github/vendor -m .github/workflows .github/release-workflows.jsonnet + +.PHONY: release-workflows-check +release-workflows-check: + @$(MAKE) release-workflows + @echo "Checking diff" + @git diff --exit-code -- ".github/workflows/*release*" || (echo "Please build release workflows by running 'make release-workflows'" && false) From ae180d6e070946eb5359ecd63a9e01e02f160ce3 Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Tue, 23 Apr 2024 12:41:03 -0600 Subject: [PATCH 09/16] fix(nix): lambda-promtail vendor hash (#12763) --- nix/packages/loki.nix | 2 +- pkg/bloomgateway/processor.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nix/packages/loki.nix b/nix/packages/loki.nix index e88fac6b6bb0..977161460eb8 100644 --- a/nix/packages/loki.nix +++ b/nix/packages/loki.nix @@ -5,7 +5,7 @@ let pname = "lambda-promtail"; src = ./../../tools/lambda-promtail; - vendorHash = "sha256-PBdPIrN0aWO38bgoAg6jZlY7scpUM2tAjJ6bMN4SQt8="; + vendorHash = "sha256-CKob173T0VHD5c8F26aU7p1l+QzqddNM4qQedMbLJa0="; doCheck = false; diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go index 8f79e417ab9f..d94b305a9b26 100644 --- a/pkg/bloomgateway/processor.go +++ b/pkg/bloomgateway/processor.go @@ -63,7 +63,7 @@ func (p *processor) runWithBounds(ctx context.Context, tasks []Task, bounds v1.M return nil } -func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, keyspaces v1.MultiFingerprintBounds, tasks []Task) error { +func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, _ v1.MultiFingerprintBounds, tasks []Task) error { level.Info(p.logger).Log("msg", "process tasks for day", "tenant", tenant, "tasks", len(tasks), "day", day.String()) var duration time.Duration From 72534449a07cd9f410973f2d01772024e8e4b7ba Mon Sep 17 00:00:00 2001 From: Lukas Juozas Janusaitis <74900682+LukoJy3D@users.noreply.github.com> Date: Tue, 23 Apr 2024 22:42:27 +0300 Subject: [PATCH 10/16] fix(workflows): don't run metric collector on forks (#12687) --- .github/workflows/metrics-collector.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/metrics-collector.yml b/.github/workflows/metrics-collector.yml index 5b227db2e8e8..5827dd8d324c 100644 --- a/.github/workflows/metrics-collector.yml +++ b/.github/workflows/metrics-collector.yml @@ -5,6 +5,7 @@ on: jobs: main: + if: github.owner == "grafana" runs-on: ubuntu-latest steps: - name: Checkout Actions From 282e38548ceb96b1c518010c47b8eabf4317e8fd Mon Sep 17 00:00:00 2001 From: Jay Clifford <45856600+Jayclifford345@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:40:33 -0400 Subject: [PATCH 11/16] feat: Update getting started demo to Loki 3.0 (#12723) Co-authored-by: J Stickler --- docs/sources/get-started/quick-start.md | 19 +++++++----- .../getting-started/alloy-local-config.yaml | 30 +++++++++++++++++++ examples/getting-started/docker-compose.yaml | 29 ++++++++++++++---- examples/getting-started/loki-config.yaml | 16 ++++++++-- .../promtail-local-config.yaml | 22 -------------- 5 files changed, 79 insertions(+), 37 deletions(-) create mode 100644 examples/getting-started/alloy-local-config.yaml delete mode 100644 examples/getting-started/promtail-local-config.yaml diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md index 16e14be923ac..b08f07a8e797 100644 --- a/docs/sources/get-started/quick-start.md +++ b/docs/sources/get-started/quick-start.md @@ -12,14 +12,15 @@ If you want to experiment with Loki, you can run Loki locally using the Docker C The Docker Compose configuration instantiates the following components, each in its own container: - **flog** a sample application which generates log lines. [flog](https://github.com/mingrammer/flog) is a log generator for common log formats. -- **Promtail** which scrapes the log lines from flog, and pushes them to Loki through the gateway. +- **Grafana Alloy** which scrapes the log lines from flog, and pushes them to Loki through the gateway. - **Gateway** (NGINX) which receives requests and redirects them to the appropriate container based on the request's URL. -- One Loki **read** component. -- One Loki **write** component. +- One Loki **read** component (Query Frontend, Querier). +- One Loki **write** component (Distributor, Ingester). +- One Loki **backend** component (Index Gateway, Compactor, Ruler, Bloom Compactor (Experimental), Bloom Gateway (Experimental)). - **Minio** an S3-compatible object store which Loki uses to store its index and chunks. - **Grafana** which provides visualization of the log lines captured within Loki. -{{< figure max-width="75%" src="/media/docs/loki/get-started-flog-v2.png" caption="Getting started sample application" alt="Getting started sample application">}} +{{< figure max-width="75%" src="/media/docs/loki/get-started-flog-v3.png" caption="Getting started sample application" alt="Getting started sample application">}} ## Installing Loki and collecting sample logs @@ -41,11 +42,11 @@ This quickstart assumes you are running Linux. cd evaluate-loki ``` -1. Download `loki-config.yaml`, `promtail-local-config.yaml`, and `docker-compose.yaml`: +1. Download `loki-config.yaml`, `alloy-local-config.yaml`, and `docker-compose.yaml`: ```bash wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/loki-config.yaml -O loki-config.yaml - wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/promtail-local-config.yaml -O promtail-local-config.yaml + wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/alloy-local-config.yaml -O alloy-local-config.yaml wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/docker-compose.yaml -O docker-compose.yaml ``` @@ -63,16 +64,20 @@ This quickstart assumes you are running Linux. ✔ Network evaluate-loki_loki Created 0.1s ✔ Container evaluate-loki-minio-1 Started 0.6s ✔ Container evaluate-loki-flog-1 Started 0.6s + ✔ Container evaluate-loki-backend-1 Started 0.8s ✔ Container evaluate-loki-write-1 Started 0.8s ✔ Container evaluate-loki-read-1 Started 0.8s ✔ Container evaluate-loki-gateway-1 Started 1.1s ✔ Container evaluate-loki-grafana-1 Started 1.4s - ✔ Container evaluate-loki-promtail-1 Started 1.4s + ✔ Container evaluate-loki-alloy-1 Started 1.4s ``` 1. (Optional) Verify that the Loki cluster is up and running. - The read component returns `ready` when you point a web browser at [http://localhost:3101/ready](http://localhost:3101/ready). The message `Query Frontend not ready: not ready: number of schedulers this worker is connected to is 0` will show prior to the read component being ready. - The write component returns `ready` when you point a web browser at [http://localhost:3102/ready](http://localhost:3102/ready). The message `Ingester not ready: waiting for 15s after being ready` will show prior to the write component being ready. + +1. (Optional) Verify that Grafana Alloy is running. + - Grafana Alloy's UI can be accessed at [http://localhost:12345](http://localhost:12345). ## Viewing your logs in Grafana diff --git a/examples/getting-started/alloy-local-config.yaml b/examples/getting-started/alloy-local-config.yaml new file mode 100644 index 000000000000..ff0448ac5435 --- /dev/null +++ b/examples/getting-started/alloy-local-config.yaml @@ -0,0 +1,30 @@ +discovery.docker "flog_scrape" { + host = "unix:///var/run/docker.sock" + refresh_interval = "5s" +} + +discovery.relabel "flog_scrape" { + targets = [] + + rule { + source_labels = ["__meta_docker_container_name"] + regex = "/(.*)" + target_label = "container" + } +} + +loki.source.docker "flog_scrape" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.flog_scrape.targets + forward_to = [loki.write.default.receiver] + relabel_rules = discovery.relabel.flog_scrape.rules + refresh_interval = "5s" +} + +loki.write "default" { + endpoint { + url = "http://gateway:3100/loki/api/v1/push" + tenant_id = "tenant1" + } + external_labels = {} +} diff --git a/examples/getting-started/docker-compose.yaml b/examples/getting-started/docker-compose.yaml index 83dcde94d273..449fe55f2b6e 100644 --- a/examples/getting-started/docker-compose.yaml +++ b/examples/getting-started/docker-compose.yaml @@ -6,7 +6,7 @@ networks: services: read: - image: grafana/loki:2.9.2 + image: grafana/loki:3.0.0 command: "-config.file=/etc/loki/config.yaml -target=read" ports: - 3101:3100 @@ -27,7 +27,7 @@ services: - loki write: - image: grafana/loki:2.9.2 + image: grafana/loki:3.0.0 command: "-config.file=/etc/loki/config.yaml -target=write" ports: - 3102:3100 @@ -45,12 +45,14 @@ services: networks: <<: *loki-dns - promtail: - image: grafana/promtail:2.9.2 + alloy: + image: grafana/alloy:latest volumes: - - ./promtail-local-config.yaml:/etc/promtail/config.yaml:ro + - ./alloy-local-config.yaml:/etc/alloy/config.alloy:ro - /var/run/docker.sock:/var/run/docker.sock - command: -config.file=/etc/promtail/config.yaml + command: run --server.http.listen-addr=0.0.0.0:12345 --storage.path=/var/lib/alloy/data /etc/alloy/config.alloy + ports: + - 12345:12345 depends_on: - gateway networks: @@ -118,6 +120,20 @@ services: networks: - loki + backend: + image: grafana/loki:3.0.0 + volumes: + - ./loki-config.yaml:/etc/loki/config.yaml + ports: + - "3100" + - "7946" + command: "-config.file=/etc/loki/config.yaml -target=backend -legacy-read-mode=false" + depends_on: + - gateway + networks: + - loki + + gateway: image: nginx:latest depends_on: @@ -186,6 +202,7 @@ services: retries: 5 networks: - loki + flog: image: mingrammer/flog diff --git a/examples/getting-started/loki-config.yaml b/examples/getting-started/loki-config.yaml index 73ca66f78796..3228092e4e8f 100644 --- a/examples/getting-started/loki-config.yaml +++ b/examples/getting-started/loki-config.yaml @@ -1,9 +1,17 @@ --- server: + http_listen_address: 0.0.0.0 http_listen_port: 3100 + memberlist: - join_members: - - loki:7946 + join_members: ["read", "write", "backend"] + dead_node_reclaim_time: 30s + gossip_to_dead_nodes_time: 15s + left_ingesters_timeout: 30s + bind_addr: ['0.0.0.0'] + bind_port: 7946 + gossip_interval: 2s + schema_config: configs: - from: 2021-08-01 @@ -16,6 +24,7 @@ schema_config: common: path_prefix: /loki replication_factor: 1 + compactor_address: http://backend:3100 storage: s3: endpoint: minio:9000 @@ -31,3 +40,6 @@ ruler: storage: s3: bucketnames: loki-ruler + +compactor: + working_directory: /tmp/compactor \ No newline at end of file diff --git a/examples/getting-started/promtail-local-config.yaml b/examples/getting-started/promtail-local-config.yaml deleted file mode 100644 index dcb2d3eed81a..000000000000 --- a/examples/getting-started/promtail-local-config.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -server: - http_listen_port: 9080 - grpc_listen_port: 0 - -positions: - filename: /tmp/positions.yaml - -clients: - - url: http://gateway:3100/loki/api/v1/push - tenant_id: tenant1 - -scrape_configs: - - job_name: flog_scrape - docker_sd_configs: - - host: unix:///var/run/docker.sock - refresh_interval: 5s - relabel_configs: - - source_labels: ['__meta_docker_container_name'] - regex: '/(.*)' - target_label: 'container' - From c36b1142c7acd6a13a3634ddbef71254040cff73 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Wed, 24 Apr 2024 13:23:42 +0200 Subject: [PATCH 12/16] fix(blooms): Correctly return unfiltered chunks for series that are not mapped to any block (#12774) This PR fixes a conceptual mistake in the code of resolving blocks on the index gateways. Currently, a series does not resolve to any block is discarded instead of being kept for the response. This change adds the chunks of the skipped series to the bloom querier response. Signed-off-by: Christian Haudum --- pkg/bloomgateway/querier.go | 5 +- pkg/bloomgateway/querier_test.go | 4 +- pkg/bloomgateway/resolver.go | 35 ++++++- pkg/bloomgateway/resolver_test.go | 156 ++++++++++++++++++++++++++++-- 4 files changed, 184 insertions(+), 16 deletions(-) diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go index bbb9f7495d8e..a6209f9ccf34 100644 --- a/pkg/bloomgateway/querier.go +++ b/pkg/bloomgateway/querier.go @@ -99,7 +99,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from // only covers a single day, and if not, it's at most two days. for _, s := range partitionSeriesByDay(from, through, grouped) { day := bloomshipper.NewInterval(s.day.Time, s.day.Time.Add(Day)) - blocks, err := bq.blockResolver.Resolve(ctx, tenant, day, s.series) + blocks, skipped, err := bq.blockResolver.Resolve(ctx, tenant, day, s.series) if err != nil { return nil, err } @@ -121,6 +121,9 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from return nil, err } + // add chunk refs from series that were not mapped to any blocks + refs = append(refs, skipped...) + for i := range refs { seriesSeen[refs[i].Fingerprint] = struct{}{} for _, ref := range refs[i].Refs { diff --git a/pkg/bloomgateway/querier_test.go b/pkg/bloomgateway/querier_test.go index a27d90a02124..516f1cd403bb 100644 --- a/pkg/bloomgateway/querier_test.go +++ b/pkg/bloomgateway/querier_test.go @@ -40,7 +40,7 @@ func (c *noopClient) FilterChunks(_ context.Context, _ string, _ bloomshipper.In type mockBlockResolver struct{} // Resolve implements BlockResolver. -func (*mockBlockResolver) Resolve(_ context.Context, tenant string, interval bloomshipper.Interval, series []*logproto.GroupedChunkRefs) ([]blockWithSeries, error) { +func (*mockBlockResolver) Resolve(_ context.Context, tenant string, interval bloomshipper.Interval, series []*logproto.GroupedChunkRefs) ([]blockWithSeries, []*logproto.GroupedChunkRefs, error) { day := truncateDay(interval.Start) first, last := getFirstLast(series) block := bloomshipper.BlockRef{ @@ -53,7 +53,7 @@ func (*mockBlockResolver) Resolve(_ context.Context, tenant string, interval blo Checksum: 0, }, } - return []blockWithSeries{{block: block, series: series}}, nil + return []blockWithSeries{{block: block, series: series}}, nil, nil } var _ BlockResolver = &mockBlockResolver{} diff --git a/pkg/bloomgateway/resolver.go b/pkg/bloomgateway/resolver.go index 3c5d8853d9ab..c10ebc33dff3 100644 --- a/pkg/bloomgateway/resolver.go +++ b/pkg/bloomgateway/resolver.go @@ -15,7 +15,7 @@ import ( ) type BlockResolver interface { - Resolve(context.Context, string, bloomshipper.Interval, []*logproto.GroupedChunkRefs) ([]blockWithSeries, error) + Resolve(context.Context, string, bloomshipper.Interval, []*logproto.GroupedChunkRefs) ([]blockWithSeries, []*logproto.GroupedChunkRefs, error) } type blockWithSeries struct { @@ -28,7 +28,7 @@ type defaultBlockResolver struct { logger log.Logger } -func (r *defaultBlockResolver) Resolve(ctx context.Context, tenant string, interval bloomshipper.Interval, series []*logproto.GroupedChunkRefs) ([]blockWithSeries, error) { +func (r *defaultBlockResolver) Resolve(ctx context.Context, tenant string, interval bloomshipper.Interval, series []*logproto.GroupedChunkRefs) ([]blockWithSeries, []*logproto.GroupedChunkRefs, error) { minFp, maxFp := getFirstLast(series) metaSearch := bloomshipper.MetaSearchParams{ TenantID: tenant, @@ -52,10 +52,12 @@ func (r *defaultBlockResolver) Resolve(ctx context.Context, tenant string, inter ) if err != nil { - return nil, err + return nil, series, err } - return blocksMatchingSeries(metas, interval, series), nil + mapped := blocksMatchingSeries(metas, interval, series) + skipped := unassignedSeries(mapped, series) + return mapped, skipped, nil } func blocksMatchingSeries(metas []bloomshipper.Meta, interval bloomshipper.Interval, series []*logproto.GroupedChunkRefs) []blockWithSeries { @@ -96,6 +98,31 @@ func blocksMatchingSeries(metas []bloomshipper.Meta, interval bloomshipper.Inter return result } +func unassignedSeries(mapped []blockWithSeries, series []*logproto.GroupedChunkRefs) []*logproto.GroupedChunkRefs { + skipped := make([]*logproto.GroupedChunkRefs, len(series)) + _ = copy(skipped, series) + + for _, block := range mapped { + minFp, maxFp := getFirstLast(block.series) + + minIdx := sort.Search(len(skipped), func(i int) bool { + return skipped[i].Fingerprint >= minFp.Fingerprint + }) + + maxIdx := sort.Search(len(skipped), func(i int) bool { + return skipped[i].Fingerprint >= maxFp.Fingerprint + }) + + if minIdx == len(skipped) || maxIdx == 0 || minIdx == maxIdx { + continue + } + + skipped = append(skipped[0:minIdx], skipped[maxIdx+1:]...) + } + + return skipped +} + func NewBlockResolver(store bloomshipper.Store, logger log.Logger) BlockResolver { return &defaultBlockResolver{ store: store, diff --git a/pkg/bloomgateway/resolver_test.go b/pkg/bloomgateway/resolver_test.go index a2cd422e1594..7214537d6885 100644 --- a/pkg/bloomgateway/resolver_test.go +++ b/pkg/bloomgateway/resolver_test.go @@ -11,18 +11,22 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" ) +func makeBlockRef(minFp, maxFp model.Fingerprint, from, through model.Time) bloomshipper.BlockRef { + return bloomshipper.BlockRef{ + Ref: bloomshipper.Ref{ + TenantID: "tenant", + TableName: "table", + Bounds: v1.NewBounds(minFp, maxFp), + StartTimestamp: from, + EndTimestamp: through, + }, + } +} + func makeMeta(minFp, maxFp model.Fingerprint, from, through model.Time) bloomshipper.Meta { return bloomshipper.Meta{ Blocks: []bloomshipper.BlockRef{ - { - Ref: bloomshipper.Ref{ - TenantID: "tenant", - TableName: "table", - Bounds: v1.NewBounds(minFp, maxFp), - StartTimestamp: from, - EndTimestamp: through, - }, - }, + makeBlockRef(minFp, maxFp, from, through), }, } } @@ -113,3 +117,137 @@ func TestBlockResolver_BlocksMatchingSeries(t *testing.T) { require.Equal(t, expected, res) }) } + +func TestBlockResolver_UnassignedSeries(t *testing.T) { + series := []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x00}, + {Fingerprint: 0x20}, + {Fingerprint: 0x40}, + {Fingerprint: 0x60}, + {Fingerprint: 0x80}, + {Fingerprint: 0xa0}, + {Fingerprint: 0xc0}, + {Fingerprint: 0xe0}, + } + + testCases := []struct { + desc string + mapped []blockWithSeries + expected []*logproto.GroupedChunkRefs + }{ + { + desc: "no blocks - all unassigned", + mapped: []blockWithSeries{}, + expected: series, + }, + { + desc: "block has no overlapping series - all unassigned", + mapped: []blockWithSeries{ + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0xf0}, + {Fingerprint: 0xff}, + }, + }, + }, + expected: series, + }, + { + desc: "single block covering all series - no unassigned", + mapped: []blockWithSeries{ + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x00}, + {Fingerprint: 0x20}, + {Fingerprint: 0x40}, + {Fingerprint: 0x60}, + {Fingerprint: 0x80}, + {Fingerprint: 0xa0}, + {Fingerprint: 0xc0}, + {Fingerprint: 0xe0}, + }, + }, + }, + expected: []*logproto.GroupedChunkRefs{}, + }, + { + desc: "multiple blocks covering all series - no unassigned", + mapped: []blockWithSeries{ + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x00}, + {Fingerprint: 0x20}, + {Fingerprint: 0x40}, + {Fingerprint: 0x60}, + }, + }, + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x40}, + {Fingerprint: 0x60}, + {Fingerprint: 0x80}, + {Fingerprint: 0xa0}, + }, + }, + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x80}, + {Fingerprint: 0xa0}, + {Fingerprint: 0xc0}, + {Fingerprint: 0xe0}, + }, + }, + }, + expected: []*logproto.GroupedChunkRefs{}, + }, + { + desc: "single block overlapping some series", + mapped: []blockWithSeries{ + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x00}, + {Fingerprint: 0x20}, + {Fingerprint: 0x40}, + {Fingerprint: 0x60}, + }, + }, + }, + expected: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x80}, + {Fingerprint: 0xa0}, + {Fingerprint: 0xc0}, + {Fingerprint: 0xe0}, + }, + }, + { + desc: "multiple blocks overlapping some series", + mapped: []blockWithSeries{ + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x20}, + {Fingerprint: 0x40}, + {Fingerprint: 0x60}, + }, + }, + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x80}, + {Fingerprint: 0xa0}, + {Fingerprint: 0xc0}, + }, + }, + }, + expected: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x00}, + {Fingerprint: 0xe0}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + result := unassignedSeries(tc.mapped, series) + require.Equal(t, result, tc.expected) + }) + } +} From ecefb495084a59d25778af520041766e087598ba Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Wed, 24 Apr 2024 17:04:51 +0200 Subject: [PATCH 13/16] fix(blooms): Fix a regression introduced with #12774 (#12776) This PR fixes a regression introduced with #12774 where series where wrongly identified as "skipped" even though there were part of blocks. Signed-off-by: Christian Haudum --- pkg/bloomgateway/querier.go | 9 +++++++++ pkg/bloomgateway/resolver.go | 4 ++-- pkg/bloomgateway/resolver_test.go | 24 +++++++++++++++++++++++- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go index a6209f9ccf34..6fc6e993be64 100644 --- a/pkg/bloomgateway/querier.go +++ b/pkg/bloomgateway/querier.go @@ -22,6 +22,7 @@ type querierMetrics struct { chunksFiltered prometheus.Counter seriesTotal prometheus.Counter seriesFiltered prometheus.Counter + seriesSkipped prometheus.Counter } func newQuerierMetrics(registerer prometheus.Registerer, namespace, subsystem string) *querierMetrics { @@ -50,6 +51,12 @@ func newQuerierMetrics(registerer prometheus.Registerer, namespace, subsystem st Name: "series_filtered_total", Help: "Total amount of series that have been filtered out. Does not count series in failed requests.", }), + seriesSkipped: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "series_skipped_total", + Help: "Total amount of series that have been skipped and returned unfiltered, because no block matched the series.", + }), } } @@ -114,6 +121,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from "series", len(s.series), "chunks", chunks, "blocks", len(blocks), + "skipped", len(skipped), ) refs, err := bq.c.FilterChunks(ctx, tenant, s.interval, blocks, queryPlan) @@ -123,6 +131,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from // add chunk refs from series that were not mapped to any blocks refs = append(refs, skipped...) + bq.metrics.seriesSkipped.Add(float64(len(skipped))) for i := range refs { seriesSeen[refs[i].Fingerprint] = struct{}{} diff --git a/pkg/bloomgateway/resolver.go b/pkg/bloomgateway/resolver.go index c10ebc33dff3..c5b24115a211 100644 --- a/pkg/bloomgateway/resolver.go +++ b/pkg/bloomgateway/resolver.go @@ -110,14 +110,14 @@ func unassignedSeries(mapped []blockWithSeries, series []*logproto.GroupedChunkR }) maxIdx := sort.Search(len(skipped), func(i int) bool { - return skipped[i].Fingerprint >= maxFp.Fingerprint + return skipped[i].Fingerprint > maxFp.Fingerprint }) if minIdx == len(skipped) || maxIdx == 0 || minIdx == maxIdx { continue } - skipped = append(skipped[0:minIdx], skipped[maxIdx+1:]...) + skipped = append(skipped[0:minIdx], skipped[maxIdx:]...) } return skipped diff --git a/pkg/bloomgateway/resolver_test.go b/pkg/bloomgateway/resolver_test.go index 7214537d6885..e6369cbeff9e 100644 --- a/pkg/bloomgateway/resolver_test.go +++ b/pkg/bloomgateway/resolver_test.go @@ -242,12 +242,34 @@ func TestBlockResolver_UnassignedSeries(t *testing.T) { {Fingerprint: 0xe0}, }, }, + { + desc: "block overlapping single remaining series", + mapped: []blockWithSeries{ + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0x00}, + {Fingerprint: 0x20}, + {Fingerprint: 0x40}, + {Fingerprint: 0x60}, + {Fingerprint: 0x80}, + {Fingerprint: 0xa0}, + {Fingerprint: 0xc0}, + }, + }, + { + series: []*logproto.GroupedChunkRefs{ + {Fingerprint: 0xe0}, + }, + }, + }, + expected: []*logproto.GroupedChunkRefs{}, + }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { result := unassignedSeries(tc.mapped, series) - require.Equal(t, result, tc.expected) + require.Equal(t, tc.expected, result) }) } } From fb9b0e80a757c698ed6d5b0e4494454a25b4be30 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 24 Apr 2024 11:30:14 -0700 Subject: [PATCH 14/16] chore(blooms): adds more instrumentation to block building (#12779) --- pkg/bloomcompactor/spec.go | 34 +++++++++++++++++++++++++++------ pkg/storage/bloom/v1/builder.go | 26 ++++++++++++++++--------- pkg/storage/bloom/v1/metrics.go | 18 +++++++++++++++++ 3 files changed, 63 insertions(+), 15 deletions(-) diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go index 6c7e095dbed8..74b056e07b0a 100644 --- a/pkg/bloomcompactor/spec.go +++ b/pkg/bloomcompactor/spec.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -74,12 +75,16 @@ func NewSimpleBloomGenerator( logger log.Logger, ) *SimpleBloomGenerator { return &SimpleBloomGenerator{ - userID: userID, - opts: opts, - store: store, - chunkLoader: chunkLoader, - blocksIter: blocksIter, - logger: log.With(logger, "component", "bloom_generator"), + userID: userID, + opts: opts, + store: store, + chunkLoader: chunkLoader, + blocksIter: blocksIter, + logger: log.With( + logger, + "component", "bloom_generator", + "org_id", userID, + ), readWriterFn: readWriterFn, metrics: metrics, reporter: reporter, @@ -90,6 +95,13 @@ func NewSimpleBloomGenerator( func (s *SimpleBloomGenerator) populator(ctx context.Context) func(series *v1.Series, bloom *v1.Bloom) (int, error) { return func(series *v1.Series, bloom *v1.Bloom) (int, error) { + start := time.Now() + level.Debug(s.logger).Log( + "msg", "populating bloom filter", + "stage", "before", + "fp", series.Fingerprint, + "chunks", len(series.Chunks), + ) chunkItersWithFP, err := s.chunkLoader.Load(ctx, s.userID, series) if err != nil { return 0, errors.Wrapf(err, "failed to load chunks for series: %+v", series) @@ -103,6 +115,16 @@ func (s *SimpleBloomGenerator) populator(ctx context.Context) func(series *v1.Se chunkItersWithFP.itr, ) + level.Debug(s.logger).Log( + "msg", "populating bloom filter", + "stage", "after", + "fp", series.Fingerprint, + "chunks", len(series.Chunks), + "series_bytes", bytesAdded, + "duration", time.Since(start), + "err", err, + ) + if s.reporter != nil { s.reporter(series.Fingerprint) } diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go index 0d129aa3def2..f09a7bc31744 100644 --- a/pkg/storage/bloom/v1/builder.go +++ b/pkg/storage/bloom/v1/builder.go @@ -157,7 +157,7 @@ func (b *BlockBuilder) AddSeries(series SeriesWithBloom) (bool, error) { return false, errors.Wrapf(err, "writing index for series %v", series.Series.Fingerprint) } - full, err := b.isBlockFull() + full, _, err := b.IsBlockFull() if err != nil { return false, errors.Wrap(err, "checking if block is full") } @@ -165,18 +165,18 @@ func (b *BlockBuilder) AddSeries(series SeriesWithBloom) (bool, error) { return full, nil } -func (b *BlockBuilder) isBlockFull() (bool, error) { - // if the block size is 0, the max size is unlimited - if b.opts.BlockSize == 0 { - return false, nil +func (b *BlockBuilder) IsBlockFull() (full bool, size int, err error) { + size, err = b.writer.Size() + if err != nil { + return false, 0, errors.Wrap(err, "getting block size") } - size, err := b.writer.Size() - if err != nil { - return false, errors.Wrap(err, "getting block size") + // if the block size is 0, the max size is unlimited + if b.opts.BlockSize == 0 { + return false, size, nil } - return uint64(size) >= b.opts.BlockSize, nil + return uint64(size) >= b.opts.BlockSize, size, nil } type BloomBlockBuilder struct { @@ -657,6 +657,14 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (checksum uint32, totalByte return 0, totalBytes, errors.Wrap(err, "iterating store") } + flushedFor := blockFlushReasonFinished + full, sz, _ := builder.IsBlockFull() + if full { + flushedFor = blockFlushReasonFull + } + mb.metrics.blockSize.Observe(float64(sz)) + mb.metrics.blockFlushReason.WithLabelValues(flushedFor).Inc() + checksum, err = builder.Close() if err != nil { return 0, totalBytes, errors.Wrap(err, "closing block") diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go index c45b2235dccd..6de8c41a791a 100644 --- a/pkg/storage/bloom/v1/metrics.go +++ b/pkg/storage/bloom/v1/metrics.go @@ -18,6 +18,9 @@ type Metrics struct { tokensTotal prometheus.Counter insertsTotal *prometheus.CounterVec + blockSize prometheus.Histogram + blockFlushReason *prometheus.CounterVec + pagesRead *prometheus.CounterVec pagesSkipped *prometheus.CounterVec bytesRead *prometheus.CounterVec @@ -34,6 +37,9 @@ const ( collisionTypeTrue = "true" collisionTypeCache = "cache" + blockFlushReasonFull = "full" + blockFlushReasonFinished = "finished" + pageTypeBloom = "bloom" pageTypeSeries = "series" @@ -94,6 +100,18 @@ func NewMetrics(r prometheus.Registerer) *Metrics { Help: "Number of inserts into the bloom filter. collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter). token_type may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)", }, []string{"token_type", "collision"}), + blockSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Namespace: constants.Loki, + Name: "bloom_block_size", + Help: "Size of the bloom block in bytes", + Buckets: prometheus.ExponentialBucketsRange(1<<20, 1<<30, 8), + }), + blockFlushReason: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: constants.Loki, + Name: "bloom_block_flush_reason_total", + Help: "Reason the block was finished. Can be either `full` (the block hit its maximum size) or `finished` (the block was finished due to the end of the series).", + }, []string{"reason"}), + pagesRead: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ Namespace: constants.Loki, Name: "bloom_pages_read_total", From 3cbdd95e76e6fd5c3b7e0a9fb9a95312efc37e27 Mon Sep 17 00:00:00 2001 From: J Stickler Date: Wed, 24 Apr 2024 15:22:41 -0400 Subject: [PATCH 15/16] docs: Update storage topics (v3.0) (#12768) --- docs/sources/configure/storage.md | 15 +++++++-------- docs/sources/operations/storage/_index.md | 8 ++------ docs/sources/operations/storage/boltdb-shipper.md | 4 +--- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/docs/sources/configure/storage.md b/docs/sources/configure/storage.md index a4786d1cdebe..a815b98f9889 100644 --- a/docs/sources/configure/storage.md +++ b/docs/sources/configure/storage.md @@ -31,11 +31,11 @@ Single Store refers to using object storage as the storage medium for both Loki' ### TSDB (recommended) -Starting in Loki 2.8, the [TSDB index store](https://grafana.com/docs/loki//operations/storage/tsdb/) improves query performance, reduces TCO and has the same feature parity as "boltdb-shipper". +Starting in Loki 2.8, the [TSDB index store](https://grafana.com/docs/loki//operations/storage/tsdb/) improves query performance, reduces TCO and has the same feature parity as "boltdb-shipper". TSDB is the recommended index store for Loki 2.8 and newer. ### BoltDB (deprecated) -Also known as "boltdb-shipper" during development (and is still the schema `store` name). The single store configurations for Loki utilize the chunk store for both chunks and the index, requiring just one store to run Loki. +Also known as "boltdb-shipper" during development (and is still the schema `store` name). The single store configurations for Loki utilize the chunk store for both chunks and the index, requiring just one store to run Loki. BoldDB is the recommended index store for Loki v2.0.0 through v2.7x. Performance is comparable to a dedicated index type while providing a much less expensive and less complicated deployment. When using Single Store, no extra [Chunk storage](#chunk-storage) and [Index storage](#index-storage) are necessary. @@ -172,7 +172,7 @@ For more information, see the [table manager](https://grafana.com/docs/loki//configure/#period_config) entry in your [schema_config](https://grafana.com/docs/loki//configure/#schema_config). The important thing to remember here is to set this at some point in the _future_ and then roll out the config file changes to Loki. This allows the table manager to create the required table in advance of writes and ensures that existing data isn't queried as if it adheres to the new schema. As an example, let's say it's 2023-07-14 and we want to start using the `v13` schema on the 20th: + ```yaml schema_config: configs: @@ -223,7 +224,6 @@ We're interested in adding targeted deletion in future Loki releases (think tena For more information, see the [retention configuration](https://grafana.com/docs/loki//operations/storage/retention/) documentation. - ## Examples ### Single machine/local development (boltdb+filesystem) @@ -314,7 +314,7 @@ The role should have a policy with the following permissions attached. } ``` -**To setup an S3 bucket and an IAM role and policy:** +**To setup an S3 bucket and an IAM role and policy:** This guide assumes a provisioned EKS cluster. @@ -324,14 +324,14 @@ This guide assumes a provisioned EKS cluster. 3. Export the AWS profile and region if not done so: - ``` + ```bash export AWS_PROFILE= export AWS_REGION= ``` 4. Save the OIDC provider in an environment variable: - ``` + ```bash oidc_provider=$(aws eks describe-cluster --name --query "cluster.identity.oidc.issuer" --output text | sed -e "s/^https:\/\///") ``` @@ -342,7 +342,6 @@ This guide assumes a provisioned EKS cluster. Note, the bucket name defaults to `loki-data` but can be changed via the `bucket_name` variable. - ### Azure deployment (Azure Blob Storage Single Store) #### Using account name and key diff --git a/docs/sources/operations/storage/_index.md b/docs/sources/operations/storage/_index.md index 26862d3a88fb..b0cea23bd43d 100644 --- a/docs/sources/operations/storage/_index.md +++ b/docs/sources/operations/storage/_index.md @@ -30,9 +30,8 @@ For more information: ### ✅ Supported index stores -- [Single Store TSDB](https://grafana.com/docs/loki//operations/storage/tsdb/) index store which stores TSDB index files in the object store. - **This is the recommended index store for Loki 2.8 and newer.** -- [Single Store BoltDB (boltdb-shipper)](https://grafana.com/docs/loki//operations/storage/boltdb-shipper/) index store which stores boltdb index files in the object store. +- [Single Store TSDB](https://grafana.com/docs/loki//operations/storage/tsdb/) index store which stores TSDB index files in the object store. This is the recommended index store for Loki 2.8 and newer. +- [Single Store BoltDB (boltdb-shipper)](https://grafana.com/docs/loki//operations/storage/boltdb-shipper/) index store which stores boltdb index files in the object store. Recommended store for Loki 2.0 through 2.7.x. ### ❌ Deprecated index stores @@ -61,7 +60,6 @@ For more information: - [Google Bigtable](https://cloud.google.com/bigtable). Support for this is deprecated and will be removed in a future release. - [Apache Cassandra](https://cassandra.apache.org). Support for this is deprecated and will be removed in a future release. - ## Cloud Storage Permissions ### S3 @@ -128,7 +126,6 @@ Resources: `*` Resources: `arn:aws:iam:::role/` - ### IBM Cloud Object Storage When using IBM Cloud Object Storage (COS) as object storage, IAM `Writer` role is needed. @@ -164,4 +161,3 @@ See the [IBM Cloud Object Storage section](https://grafana.com/docs/loki/}}) index is the recommended index. +Single store BoltDB Shipper is a legacy storage option recommended for Loki 2.0 through 2.7.x and is not recommended for new deployments. The [TSDB](https://grafana.com/docs/loki//operations/storage/tsdb/) is the recommended index for Loki 2.8 and newer. {{% /admonition %}} BoltDB Shipper lets you run Grafana Loki without any dependency on NoSQL stores for storing index. @@ -155,5 +155,3 @@ storage_config: gcs: bucket_name: GCS_BUCKET_NAME ``` - - From af09f53a33687bfa00cfb485a33c884288a4b0d1 Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Wed, 24 Apr 2024 14:14:32 -0600 Subject: [PATCH 16/16] ci: release workflows get build image from makefile (#12778) --- .drone/drone.yml | 6 +- .github/release-workflows.jsonnet | 3 +- .github/workflows/check.yml | 2 +- .github/workflows/minor-release-pr.yml | 4 +- .github/workflows/patch-release-pr.yml | 4 +- Makefile | 9 +- docs/sources/shared/configuration.md | 64 +++++++------- loki-build-image/Dockerfile | 2 +- .../queryrangebase/queryrange.pb.go | 4 +- pkg/querier/stats/stats.pb.go | 4 +- pkg/ruler/base/ruler.pb.go | 6 +- pkg/ruler/rulespb/rules.pb.go | 6 +- pkg/storage/chunk/client/grpc/grpc.pb.go | 84 +++++++++---------- tools/ensure-buildx-builder.sh | 9 ++ 14 files changed, 109 insertions(+), 98 deletions(-) create mode 100755 tools/ensure-buildx-builder.sh diff --git a/.drone/drone.yml b/.drone/drone.yml index db29dd9d1ba0..aa423d21d6f5 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -139,7 +139,7 @@ steps: depends_on: - clone environment: {} - image: grafana/loki-build-image:0.33.2 + image: grafana/loki-build-image:0.33.3 name: documentation-helm-reference-check trigger: ref: @@ -1085,7 +1085,7 @@ steps: from_secret: docker_password DOCKER_USERNAME: from_secret: docker_username - image: grafana/loki-build-image:0.33.2 + image: grafana/loki-build-image:0.33.3 name: build and push privileged: true volumes: @@ -1308,6 +1308,6 @@ kind: secret name: gpg_private_key --- kind: signature -hmac: 87480bff973003712122d81a1575e2a62cff6fd4a42b163487cae6c6a67d8e7c +hmac: 33b9d2962b6dfcf1136ef7602d29e3f32f03b0d90dfd579652cbaf0a4ef2de4b ... diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet index d21195260494..5b179b3006d8 100644 --- a/.github/release-workflows.jsonnet +++ b/.github/release-workflows.jsonnet @@ -17,7 +17,8 @@ local imageJobs = { querytee: build.image('loki-query-tee', 'cmd/querytee', platform=['linux/amd64']), }; -local buildImage = 'grafana/loki-build-image:0.33.1'; +local buildImageVersion = std.extVar('BUILD_IMAGE_VERSION'); +local buildImage = 'grafana/loki-build-image:%s' % buildImageVersion; local golangCiLintVersion = 'v1.55.1'; local imageBuildTimeoutMin = 40; diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 8d8c4acf0dad..1e42a7aa8dfd 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -2,7 +2,7 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.33.1" + "build_image": "grafana/loki-build-image:0.33.3" "golang_ci_lint_version": "v1.55.1" "release_lib_ref": "main" "skip_validation": false diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index a9e6148fb7c2..33841935ec14 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -16,7 +16,7 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.33.1" + build_image: "grafana/loki-build-image:0.33.3" golang_ci_lint_version: "v1.55.1" release_lib_ref: "main" skip_validation: false @@ -141,7 +141,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.33.1" + --entrypoint /bin/sh "grafana/loki-build-image:0.33.3" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index 63e235946815..4144b19e1253 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -16,7 +16,7 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.33.1" + build_image: "grafana/loki-build-image:0.33.3" golang_ci_lint_version: "v1.55.1" release_lib_ref: "main" skip_validation: false @@ -141,7 +141,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.33.1" + --entrypoint /bin/sh "grafana/loki-build-image:0.33.3" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages diff --git a/Makefile b/Makefile index 01c68a4052ee..1aea9d1c18ac 100644 --- a/Makefile +++ b/Makefile @@ -36,8 +36,8 @@ DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) # or you can override this with an environment variable BUILD_IN_CONTAINER ?= true -# ensure you run `make drone` after changing this -BUILD_IMAGE_VERSION ?= 0.33.2 +# ensure you run `make drone` and `make release-workflows` after changing this +BUILD_IMAGE_VERSION ?= 0.33.3 # Docker image info IMAGE_PREFIX ?= grafana @@ -668,7 +668,8 @@ ifneq (,$(findstring WIP,$(IMAGE_TAG))) false; endif echo ${DOCKER_PASSWORD} | docker login --username ${DOCKER_USERNAME} --password-stdin - $(SUDO) $(BUILD_OCI) -o type=registry -t $(IMAGE_PREFIX)/loki-build-image:$(IMAGE_TAG) ./loki-build-image + $(SUDO) DOCKER_BUILDKIT=$(DOCKER_BUILDKIT) docker buildx build $(OCI_PLATFORMS) \ + -o type=registry -t $(IMAGE_PREFIX)/loki-build-image:$(BUILD_IMAGE_VERSION) ./loki-build-image # loki-operator loki-operator-image: @@ -899,7 +900,7 @@ scan-vulnerabilities: trivy snyk .PHONY: release-workflows release-workflows: pushd $(CURDIR)/.github && jb update && popd - jsonnet -SJ .github/vendor -m .github/workflows .github/release-workflows.jsonnet + jsonnet -SJ .github/vendor -m .github/workflows -V BUILD_IMAGE_VERSION=$(BUILD_IMAGE_VERSION) .github/release-workflows.jsonnet .PHONY: release-workflows-check release-workflows-check: diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index be44ae74ca75..54db8618c347 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -431,10 +431,6 @@ compactor_grpc_client: # values: # # Secure Ciphers: - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -452,7 +448,11 @@ compactor_grpc_client: # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA + # - TLS_RSA_WITH_AES_128_CBC_SHA + # - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 + # - TLS_RSA_WITH_AES_128_GCM_SHA256 + # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA @@ -1387,10 +1387,6 @@ alertmanager_client: # values: # # Secure Ciphers: - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -1408,7 +1404,11 @@ alertmanager_client: # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA + # - TLS_RSA_WITH_AES_128_CBC_SHA + # - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 + # - TLS_RSA_WITH_AES_128_GCM_SHA256 + # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA @@ -1663,10 +1663,6 @@ evaluation: # values: # # Secure Ciphers: - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -1684,7 +1680,11 @@ evaluation: # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA + # - TLS_RSA_WITH_AES_128_CBC_SHA + # - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 + # - TLS_RSA_WITH_AES_128_GCM_SHA256 + # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA @@ -4281,10 +4281,6 @@ Configuration for an ETCD v3 client. Only applies if the selected kvstore is `et # Override the default cipher suite list (separated by commas). Allowed values: # # Secure Ciphers: -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -4302,7 +4298,11 @@ Configuration for an ETCD v3 client. Only applies if the selected kvstore is `et # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA @@ -4489,10 +4489,6 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type # Override the default cipher suite list (separated by commas). Allowed values: # # Secure Ciphers: -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -4510,7 +4506,11 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA @@ -4625,10 +4625,6 @@ backoff_config: # Override the default cipher suite list (separated by commas). Allowed values: # # Secure Ciphers: -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -4646,7 +4642,11 @@ backoff_config: # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA @@ -4707,10 +4707,6 @@ The TLS configuration. # Override the default cipher suite list (separated by commas). Allowed values: # # Secure Ciphers: -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -4728,7 +4724,11 @@ The TLS configuration. # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA @@ -4875,10 +4875,6 @@ memcached_client: # values: # # Secure Ciphers: - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_AES_128_GCM_SHA256 # - TLS_AES_256_GCM_SHA384 # - TLS_CHACHA20_POLY1305_SHA256 @@ -4896,7 +4892,11 @@ memcached_client: # Insecure Ciphers: # - TLS_RSA_WITH_RC4_128_SHA # - TLS_RSA_WITH_3DES_EDE_CBC_SHA + # - TLS_RSA_WITH_AES_128_CBC_SHA + # - TLS_RSA_WITH_AES_256_CBC_SHA # - TLS_RSA_WITH_AES_128_CBC_SHA256 + # - TLS_RSA_WITH_AES_128_GCM_SHA256 + # - TLS_RSA_WITH_AES_256_GCM_SHA384 # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_RC4_128_SHA # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index beb7b0078dae..42c52c18d625 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -49,7 +49,7 @@ RUN curl -L "https://github.com/drone/drone-cli/releases/download/v1.7.0/drone_l # github.com/fatih/faillint@v1.5.0 requires golang.org/x/tools@v0.0.0-20200207224406-61798d64f025 # (not golang.org/x/tools@v0.0.0-20190918214920-58d531046acd from golang.org/x/tools/cmd/goyacc@58d531046acdc757f177387bc1725bfa79895d69) FROM golang:1.22.2-bookworm as faillint -RUN GO111MODULE=on go install github.com/fatih/faillint@v1.11.0 +RUN GO111MODULE=on go install github.com/fatih/faillint@v1.12.0 RUN GO111MODULE=on go install golang.org/x/tools/cmd/goimports@v0.7.0 FROM golang:1.22.2-bookworm as delve diff --git a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go index 2b1f7b951903..f376455df4c2 100644 --- a/pkg/querier/queryrange/queryrangebase/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrangebase/queryrange.pb.go @@ -9,11 +9,11 @@ import ( proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" github_com_grafana_loki_v3_pkg_logproto "github.com/grafana/loki/v3/pkg/logproto" logproto "github.com/grafana/loki/v3/pkg/logproto" definitions "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions" resultscache "github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" + _ "google.golang.org/protobuf/types/known/durationpb" io "io" math "math" math_bits "math/bits" @@ -1022,7 +1022,7 @@ func (this *PrometheusRequest) String() string { `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Step:` + fmt.Sprintf("%v", this.Step) + `,`, - `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "resultscache.CachingOptions", 1), `&`, ``, 1) + `,`, `Headers:` + repeatedStringForHeaders + `,`, diff --git a/pkg/querier/stats/stats.pb.go b/pkg/querier/stats/stats.pb.go index bae01dd0eeed..f4d7e4cc1d27 100644 --- a/pkg/querier/stats/stats.pb.go +++ b/pkg/querier/stats/stats.pb.go @@ -8,7 +8,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" + _ "google.golang.org/protobuf/types/known/durationpb" io "io" math "math" math_bits "math/bits" @@ -251,7 +251,7 @@ func (this *Stats) String() string { return "nil" } s := strings.Join([]string{`&Stats{`, - `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `FetchedSeriesCount:` + fmt.Sprintf("%v", this.FetchedSeriesCount) + `,`, `FetchedChunkBytes:` + fmt.Sprintf("%v", this.FetchedChunkBytes) + `,`, `}`, diff --git a/pkg/ruler/base/ruler.pb.go b/pkg/ruler/base/ruler.pb.go index 5b3b1f1b4d5d..81ef01420b28 100644 --- a/pkg/ruler/base/ruler.pb.go +++ b/pkg/ruler/base/ruler.pb.go @@ -11,13 +11,13 @@ import ( proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" _ "github.com/grafana/loki/v3/pkg/logproto" github_com_grafana_loki_v3_pkg_logproto "github.com/grafana/loki/v3/pkg/logproto" rulespb "github.com/grafana/loki/v3/pkg/ruler/rulespb" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + _ "google.golang.org/protobuf/types/known/durationpb" io "io" math "math" math_bits "math/bits" @@ -1433,7 +1433,7 @@ func (this *GroupStateDesc) String() string { `Group:` + strings.Replace(fmt.Sprintf("%v", this.Group), "RuleGroupDesc", "rulespb.RuleGroupDesc", 1) + `,`, `ActiveRules:` + repeatedStringForActiveRules + `,`, `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1454,7 +1454,7 @@ func (this *RuleStateDesc) String() string { `LastError:` + fmt.Sprintf("%v", this.LastError) + `,`, `Alerts:` + repeatedStringForAlerts + `,`, `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go index 91afa25a655e..3765e9dd88a7 100644 --- a/pkg/ruler/rulespb/rules.pb.go +++ b/pkg/ruler/rulespb/rules.pb.go @@ -9,9 +9,9 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" _ "github.com/grafana/loki/v3/pkg/logproto" github_com_grafana_loki_v3_pkg_logproto "github.com/grafana/loki/v3/pkg/logproto" + _ "google.golang.org/protobuf/types/known/durationpb" io "io" math "math" math_bits "math/bits" @@ -657,7 +657,7 @@ func (this *RuleGroupDesc) String() string { s := strings.Join([]string{`&RuleGroupDesc{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Interval:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `Interval:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `Rules:` + repeatedStringForRules + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Options:` + repeatedStringForOptions + `,`, @@ -674,7 +674,7 @@ func (this *RuleDesc) String() string { `Expr:` + fmt.Sprintf("%v", this.Expr) + `,`, `Record:` + fmt.Sprintf("%v", this.Record) + `,`, `Alert:` + fmt.Sprintf("%v", this.Alert) + `,`, - `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, `}`, diff --git a/pkg/storage/chunk/client/grpc/grpc.pb.go b/pkg/storage/chunk/client/grpc/grpc.pb.go index d76002adfc38..6468535c2ab1 100644 --- a/pkg/storage/chunk/client/grpc/grpc.pb.go +++ b/pkg/storage/chunk/client/grpc/grpc.pb.go @@ -9,10 +9,10 @@ import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" io "io" math "math" math_bits "math/bits" @@ -1998,30 +1998,30 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GrpcStoreClient interface { // / WriteIndex writes batch of indexes to the index tables. - WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) + WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // / QueryIndex reads the indexes required for given query & sends back the batch of rows // / in rpc streams QueryIndex(ctx context.Context, in *QueryIndexRequest, opts ...grpc.CallOption) (GrpcStore_QueryIndexClient, error) // / DeleteIndex deletes the batch of index entries from the index tables - DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // / PutChunks saves the batch of chunks into the chunk tables. - PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*empty.Empty, error) + PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // / GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams // / batching needs to be performed at server level as per requirement instead of sending single chunk per stream. // / In GetChunks rpc request send buf as nil GetChunks(ctx context.Context, in *GetChunksRequest, opts ...grpc.CallOption) (GrpcStore_GetChunksClient, error) // / DeleteChunks deletes the chunks based on chunkID. - DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*emptypb.Empty, error) // / Lists all the tables that exists in the database. - ListTables(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) + ListTables(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) // / Creates a table with provided name & attributes. - CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) + CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Deletes a table using table name provided. - DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Describes a table information for the provided table. DescribeTable(ctx context.Context, in *DescribeTableRequest, opts ...grpc.CallOption) (*DescribeTableResponse, error) // Update a table with newly provided table information. - UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) + UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } type grpcStoreClient struct { @@ -2032,8 +2032,8 @@ func NewGrpcStoreClient(cc *grpc.ClientConn) GrpcStoreClient { return &grpcStoreClient{cc} } -func (c *grpcStoreClient) WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *grpcStoreClient) WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/WriteIndex", in, out, opts...) if err != nil { return nil, err @@ -2073,8 +2073,8 @@ func (x *grpcStoreQueryIndexClient) Recv() (*QueryIndexResponse, error) { return m, nil } -func (c *grpcStoreClient) DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *grpcStoreClient) DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteIndex", in, out, opts...) if err != nil { return nil, err @@ -2082,8 +2082,8 @@ func (c *grpcStoreClient) DeleteIndex(ctx context.Context, in *DeleteIndexReques return out, nil } -func (c *grpcStoreClient) PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *grpcStoreClient) PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/PutChunks", in, out, opts...) if err != nil { return nil, err @@ -2123,8 +2123,8 @@ func (x *grpcStoreGetChunksClient) Recv() (*GetChunksResponse, error) { return m, nil } -func (c *grpcStoreClient) DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *grpcStoreClient) DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteChunks", in, out, opts...) if err != nil { return nil, err @@ -2132,7 +2132,7 @@ func (c *grpcStoreClient) DeleteChunks(ctx context.Context, in *ChunkID, opts .. return out, nil } -func (c *grpcStoreClient) ListTables(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) { +func (c *grpcStoreClient) ListTables(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) { out := new(ListTablesResponse) err := c.cc.Invoke(ctx, "/grpc.grpc_store/ListTables", in, out, opts...) if err != nil { @@ -2141,8 +2141,8 @@ func (c *grpcStoreClient) ListTables(ctx context.Context, in *empty.Empty, opts return out, nil } -func (c *grpcStoreClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *grpcStoreClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/CreateTable", in, out, opts...) if err != nil { return nil, err @@ -2150,8 +2150,8 @@ func (c *grpcStoreClient) CreateTable(ctx context.Context, in *CreateTableReques return out, nil } -func (c *grpcStoreClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *grpcStoreClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteTable", in, out, opts...) if err != nil { return nil, err @@ -2168,8 +2168,8 @@ func (c *grpcStoreClient) DescribeTable(ctx context.Context, in *DescribeTableRe return out, nil } -func (c *grpcStoreClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *grpcStoreClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/grpc.grpc_store/UpdateTable", in, out, opts...) if err != nil { return nil, err @@ -2180,67 +2180,67 @@ func (c *grpcStoreClient) UpdateTable(ctx context.Context, in *UpdateTableReques // GrpcStoreServer is the server API for GrpcStore service. type GrpcStoreServer interface { // / WriteIndex writes batch of indexes to the index tables. - WriteIndex(context.Context, *WriteIndexRequest) (*empty.Empty, error) + WriteIndex(context.Context, *WriteIndexRequest) (*emptypb.Empty, error) // / QueryIndex reads the indexes required for given query & sends back the batch of rows // / in rpc streams QueryIndex(*QueryIndexRequest, GrpcStore_QueryIndexServer) error // / DeleteIndex deletes the batch of index entries from the index tables - DeleteIndex(context.Context, *DeleteIndexRequest) (*empty.Empty, error) + DeleteIndex(context.Context, *DeleteIndexRequest) (*emptypb.Empty, error) // / PutChunks saves the batch of chunks into the chunk tables. - PutChunks(context.Context, *PutChunksRequest) (*empty.Empty, error) + PutChunks(context.Context, *PutChunksRequest) (*emptypb.Empty, error) // / GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams // / batching needs to be performed at server level as per requirement instead of sending single chunk per stream. // / In GetChunks rpc request send buf as nil GetChunks(*GetChunksRequest, GrpcStore_GetChunksServer) error // / DeleteChunks deletes the chunks based on chunkID. - DeleteChunks(context.Context, *ChunkID) (*empty.Empty, error) + DeleteChunks(context.Context, *ChunkID) (*emptypb.Empty, error) // / Lists all the tables that exists in the database. - ListTables(context.Context, *empty.Empty) (*ListTablesResponse, error) + ListTables(context.Context, *emptypb.Empty) (*ListTablesResponse, error) // / Creates a table with provided name & attributes. - CreateTable(context.Context, *CreateTableRequest) (*empty.Empty, error) + CreateTable(context.Context, *CreateTableRequest) (*emptypb.Empty, error) // Deletes a table using table name provided. - DeleteTable(context.Context, *DeleteTableRequest) (*empty.Empty, error) + DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) // Describes a table information for the provided table. DescribeTable(context.Context, *DescribeTableRequest) (*DescribeTableResponse, error) // Update a table with newly provided table information. - UpdateTable(context.Context, *UpdateTableRequest) (*empty.Empty, error) + UpdateTable(context.Context, *UpdateTableRequest) (*emptypb.Empty, error) } // UnimplementedGrpcStoreServer can be embedded to have forward compatible implementations. type UnimplementedGrpcStoreServer struct { } -func (*UnimplementedGrpcStoreServer) WriteIndex(ctx context.Context, req *WriteIndexRequest) (*empty.Empty, error) { +func (*UnimplementedGrpcStoreServer) WriteIndex(ctx context.Context, req *WriteIndexRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method WriteIndex not implemented") } func (*UnimplementedGrpcStoreServer) QueryIndex(req *QueryIndexRequest, srv GrpcStore_QueryIndexServer) error { return status.Errorf(codes.Unimplemented, "method QueryIndex not implemented") } -func (*UnimplementedGrpcStoreServer) DeleteIndex(ctx context.Context, req *DeleteIndexRequest) (*empty.Empty, error) { +func (*UnimplementedGrpcStoreServer) DeleteIndex(ctx context.Context, req *DeleteIndexRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteIndex not implemented") } -func (*UnimplementedGrpcStoreServer) PutChunks(ctx context.Context, req *PutChunksRequest) (*empty.Empty, error) { +func (*UnimplementedGrpcStoreServer) PutChunks(ctx context.Context, req *PutChunksRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method PutChunks not implemented") } func (*UnimplementedGrpcStoreServer) GetChunks(req *GetChunksRequest, srv GrpcStore_GetChunksServer) error { return status.Errorf(codes.Unimplemented, "method GetChunks not implemented") } -func (*UnimplementedGrpcStoreServer) DeleteChunks(ctx context.Context, req *ChunkID) (*empty.Empty, error) { +func (*UnimplementedGrpcStoreServer) DeleteChunks(ctx context.Context, req *ChunkID) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteChunks not implemented") } -func (*UnimplementedGrpcStoreServer) ListTables(ctx context.Context, req *empty.Empty) (*ListTablesResponse, error) { +func (*UnimplementedGrpcStoreServer) ListTables(ctx context.Context, req *emptypb.Empty) (*ListTablesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListTables not implemented") } -func (*UnimplementedGrpcStoreServer) CreateTable(ctx context.Context, req *CreateTableRequest) (*empty.Empty, error) { +func (*UnimplementedGrpcStoreServer) CreateTable(ctx context.Context, req *CreateTableRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateTable not implemented") } -func (*UnimplementedGrpcStoreServer) DeleteTable(ctx context.Context, req *DeleteTableRequest) (*empty.Empty, error) { +func (*UnimplementedGrpcStoreServer) DeleteTable(ctx context.Context, req *DeleteTableRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteTable not implemented") } func (*UnimplementedGrpcStoreServer) DescribeTable(ctx context.Context, req *DescribeTableRequest) (*DescribeTableResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DescribeTable not implemented") } -func (*UnimplementedGrpcStoreServer) UpdateTable(ctx context.Context, req *UpdateTableRequest) (*empty.Empty, error) { +func (*UnimplementedGrpcStoreServer) UpdateTable(ctx context.Context, req *UpdateTableRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateTable not implemented") } @@ -2363,7 +2363,7 @@ func _GrpcStore_DeleteChunks_Handler(srv interface{}, ctx context.Context, dec f } func _GrpcStore_ListTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) + in := new(emptypb.Empty) if err := dec(in); err != nil { return nil, err } @@ -2375,7 +2375,7 @@ func _GrpcStore_ListTables_Handler(srv interface{}, ctx context.Context, dec fun FullMethod: "/grpc.grpc_store/ListTables", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).ListTables(ctx, req.(*empty.Empty)) + return srv.(GrpcStoreServer).ListTables(ctx, req.(*emptypb.Empty)) } return interceptor(ctx, in, info, handler) } diff --git a/tools/ensure-buildx-builder.sh b/tools/ensure-buildx-builder.sh new file mode 100755 index 000000000000..c9080280869c --- /dev/null +++ b/tools/ensure-buildx-builder.sh @@ -0,0 +1,9 @@ +#! /usr/bin/env bash + +set -euo pipefail + +if ! docker buildx inspect | grep -E 'Driver:\s+docker-container' >/dev/null; then + echo "Active buildx builder does not use the docker-container driver, which is required for mutli-architecture image builds. Creating a new buildx builder..." + docker buildx create --use +fi +