From 45e5b427b1d5a794de7034ddcf249d09c2730ca8 Mon Sep 17 00:00:00 2001
From: Meng Ye <4025839+jk2K@users.noreply.github.com>
Date: Thu, 1 Feb 2024 01:16:19 +0800
Subject: [PATCH 001/130] docs: fix row_shards doc (#11795)
for v10 or greater, default is 16
refer to
https://github.com/grafana/loki/blob/1002ba00dff58ed588987169c0d3a0ddac2d022b/pkg/storage/config/schema_config.go#L336
**What this PR does / why we need it**:
**Which issue(s) this PR fixes**:
Fixes #
**Special notes for your reviewer**:
**Checklist**
- [ ] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [ ] Documentation added
- [ ] Tests updated
- [ ] `CHANGELOG.md` updated
- [ ] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
- [ ] If the change is deprecating or removing a configuration option,
update the `deprecated-config.yaml` and `deleted-config.yaml` files
respectively in the `tools/deprecated-config-checker` directory.
[Example
PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
---------
Co-authored-by: J Stickler
---
docs/sources/configure/_index.md | 2 +-
pkg/storage/config/schema_config.go | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index 283a2c9dd59a..25e4f70f987c 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -4577,7 +4577,7 @@ chunks:
[tags:
ManagedAuthEnv enabled when the operator installation is on OpenShift STS clusters.
+
ManagedAuthEnv is true when OpenShift-functions are enabled and the operator has detected
+that it is running with some kind of “workload identity” (AWS STS, Azure WIF) enabled.
diff --git a/operator/internal/config/managed_auth.go b/operator/internal/config/managed_auth.go
new file mode 100644
index 000000000000..73598e7032f8
--- /dev/null
+++ b/operator/internal/config/managed_auth.go
@@ -0,0 +1,48 @@
+package config
+
+import "os"
+
+type AWSEnvironment struct {
+ RoleARN string
+}
+
+type AzureEnvironment struct {
+ ClientID string
+ SubscriptionID string
+ TenantID string
+ Region string
+}
+
+type ManagedAuthConfig struct {
+ AWS *AWSEnvironment
+ Azure *AzureEnvironment
+}
+
+func discoverManagedAuthConfig() *ManagedAuthConfig {
+ // AWS
+ roleARN := os.Getenv("ROLEARN")
+
+ // Azure
+ clientID := os.Getenv("CLIENTID")
+ tenantID := os.Getenv("TENANTID")
+ subscriptionID := os.Getenv("SUBSCRIPTIONID")
+
+ switch {
+ case roleARN != "":
+ return &ManagedAuthConfig{
+ AWS: &AWSEnvironment{
+ RoleARN: roleARN,
+ },
+ }
+ case clientID != "" && tenantID != "" && subscriptionID != "":
+ return &ManagedAuthConfig{
+ Azure: &AzureEnvironment{
+ ClientID: clientID,
+ SubscriptionID: subscriptionID,
+ TenantID: tenantID,
+ },
+ }
+ }
+
+ return nil
+}
diff --git a/operator/internal/config/options.go b/operator/internal/config/options.go
index 7ed9abb526a7..dc54404f2245 100644
--- a/operator/internal/config/options.go
+++ b/operator/internal/config/options.go
@@ -17,19 +17,24 @@ import (
// LoadConfig initializes the controller configuration, optionally overriding the defaults
// from a provided configuration file.
-func LoadConfig(scheme *runtime.Scheme, configFile string) (*configv1.ProjectConfig, ctrl.Options, error) {
+func LoadConfig(scheme *runtime.Scheme, configFile string) (*configv1.ProjectConfig, *ManagedAuthConfig, ctrl.Options, error) {
options := ctrl.Options{Scheme: scheme}
if configFile == "" {
- return &configv1.ProjectConfig{}, options, nil
+ return &configv1.ProjectConfig{}, nil, options, nil
}
ctrlCfg, err := loadConfigFile(scheme, configFile)
if err != nil {
- return nil, options, fmt.Errorf("failed to parse controller manager config file: %w", err)
+ return nil, nil, options, fmt.Errorf("failed to parse controller manager config file: %w", err)
+ }
+
+ managedAuth := discoverManagedAuthConfig()
+ if ctrlCfg.Gates.OpenShift.Enabled && managedAuth != nil {
+ ctrlCfg.Gates.OpenShift.ManagedAuthEnv = true
}
options = mergeOptionsFromFile(options, ctrlCfg)
- return ctrlCfg, options, nil
+ return ctrlCfg, managedAuth, options, nil
}
func mergeOptionsFromFile(o manager.Options, cfg *configv1.ProjectConfig) manager.Options {
diff --git a/operator/internal/handlers/credentialsrequest_create.go b/operator/internal/handlers/credentialsrequest_create.go
index 6074e10b2d5a..50e06375ffd8 100644
--- a/operator/internal/handlers/credentialsrequest_create.go
+++ b/operator/internal/handlers/credentialsrequest_create.go
@@ -3,64 +3,102 @@ package handlers
import (
"context"
"errors"
+ "fmt"
"github.com/ViaQ/logerr/v2/kverrors"
+ "github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/config"
"github.com/grafana/loki/operator/internal/external/k8s"
+ "github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
-var (
- errAzureNoSecretFound = errors.New("can not create CredentialsRequest: no azure secret found")
- errAzureNoRegion = errors.New("can not create CredentialsRequest: missing secret field: region")
-)
+var errAzureNoRegion = errors.New("can not create CredentialsRequest: missing secret field: region")
// CreateCredentialsRequest creates a new CredentialsRequest resource for a Lokistack
// to request a cloud credentials Secret resource from the OpenShift cloud-credentials-operator.
-func CreateCredentialsRequest(ctx context.Context, k k8s.Client, stack client.ObjectKey, secret *corev1.Secret) (string, error) {
- managedAuthEnv := openshift.DiscoverManagedAuthEnv()
- if managedAuthEnv == nil {
- return "", nil
+func CreateCredentialsRequest(ctx context.Context, log logr.Logger, scheme *runtime.Scheme, managedAuth *config.ManagedAuthConfig, k k8s.Client, req ctrl.Request) error {
+ ll := log.WithValues("lokistack", req.NamespacedName, "event", "createCredentialsRequest")
+
+ var stack lokiv1.LokiStack
+ if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
+ if apierrors.IsNotFound(err) {
+ // maybe the user deleted it before we could react? Either way this isn't an issue
+ ll.Error(err, "could not find the requested LokiStack", "name", req.String())
+ return nil
+ }
+ return kverrors.Wrap(err, "failed to lookup LokiStack", "name", req.String())
}
- if managedAuthEnv.Azure != nil && managedAuthEnv.Azure.Region == "" {
+ if managedAuth.Azure != nil && managedAuth.Azure.Region == "" {
// Managed environment for Azure does not provide Region, but we need this for the CredentialsRequest.
// This looks like an oversight when creating the UI in OpenShift, but for now we need to pull this data
// from somewhere else -> the Azure Storage Secret
- if secret == nil {
- return "", errAzureNoSecretFound
+ storageSecretName := client.ObjectKey{
+ Namespace: stack.Namespace,
+ Name: stack.Spec.Storage.Secret.Name,
+ }
+ storageSecret := &corev1.Secret{}
+ if err := k.Get(ctx, storageSecretName, storageSecret); err != nil {
+ if apierrors.IsNotFound(err) {
+ // Skip this error here as it will be picked up by the LokiStack handler instead
+ ll.Error(err, "could not find secret for LokiStack", "name", req.String())
+ return nil
+ }
+ return err
}
- region := secret.Data[storage.KeyAzureRegion]
+ region := storageSecret.Data[storage.KeyAzureRegion]
if len(region) == 0 {
- return "", errAzureNoRegion
+ return errAzureNoRegion
}
- managedAuthEnv.Azure.Region = string(region)
+ managedAuth.Azure.Region = string(region)
}
opts := openshift.Options{
BuildOpts: openshift.BuildOptions{
LokiStackName: stack.Name,
LokiStackNamespace: stack.Namespace,
+ RulerName: manifests.RulerName(stack.Name),
},
- ManagedAuthEnv: managedAuthEnv,
+ ManagedAuth: managedAuth,
}
credReq, err := openshift.BuildCredentialsRequest(opts)
if err != nil {
- return "", err
+ return err
}
- if err := k.Create(ctx, credReq); err != nil {
- if !apierrors.IsAlreadyExists(err) {
- return "", kverrors.Wrap(err, "failed to create credentialsrequest", "key", client.ObjectKeyFromObject(credReq))
- }
+ err = ctrl.SetControllerReference(&stack, credReq, scheme)
+ if err != nil {
+ return kverrors.Wrap(err, "failed to set controller owner reference to resource")
+ }
+
+ desired := credReq.DeepCopyObject().(client.Object)
+ mutateFn := manifests.MutateFuncFor(credReq, desired, map[string]string{})
+
+ op, err := ctrl.CreateOrUpdate(ctx, k, credReq, mutateFn)
+ if err != nil {
+ return kverrors.Wrap(err, "failed to configure CredentialRequest")
+ }
+
+ msg := fmt.Sprintf("Resource has been %s", op)
+ switch op {
+ case ctrlutil.OperationResultNone:
+ ll.V(1).Info(msg)
+ default:
+ ll.Info(msg)
}
- return credReq.Spec.SecretRef.Name, nil
+ return nil
}
diff --git a/operator/internal/handlers/credentialsrequest_create_test.go b/operator/internal/handlers/credentialsrequest_create_test.go
index df903eaec662..626302a11327 100644
--- a/operator/internal/handlers/credentialsrequest_create_test.go
+++ b/operator/internal/handlers/credentialsrequest_create_test.go
@@ -8,51 +8,108 @@ import (
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/config"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
-func TestCreateCredentialsRequest_DoNothing_WhenManagedAuthEnvMissing(t *testing.T) {
+func credentialsRequestFakeClient(cr *cloudcredentialv1.CredentialsRequest, lokistack *lokiv1.LokiStack, secret *corev1.Secret) *k8sfakes.FakeClient {
k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ switch object.(type) {
+ case *cloudcredentialv1.CredentialsRequest:
+ if cr == nil {
+ return errors.NewNotFound(schema.GroupResource{}, name.Name)
+ }
+ k.SetClientObject(object, cr)
+ case *lokiv1.LokiStack:
+ if lokistack == nil {
+ return errors.NewNotFound(schema.GroupResource{}, name.Name)
+ }
+ k.SetClientObject(object, lokistack)
+ case *corev1.Secret:
+ if secret == nil {
+ return errors.NewNotFound(schema.GroupResource{}, name.Name)
+ }
+ k.SetClientObject(object, secret)
+ }
+ return nil
+ }
- secretRef, err := CreateCredentialsRequest(context.Background(), k, key, nil)
- require.NoError(t, err)
- require.Empty(t, secretRef)
+ return k
}
func TestCreateCredentialsRequest_CreateNewResource(t *testing.T) {
- k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
+ wantServiceAccountNames := []string{
+ "my-stack",
+ "my-stack-ruler",
+ }
+
+ lokistack := &lokiv1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "ns",
+ },
+ }
- t.Setenv("ROLEARN", "a-role-arn")
+ k := credentialsRequestFakeClient(nil, lokistack, nil)
+ req := ctrl.Request{
+ NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
+ }
+
+ managedAuth := &config.ManagedAuthConfig{
+ AWS: &config.AWSEnvironment{
+ RoleARN: "a-role-arn",
+ },
+ }
- secretRef, err := CreateCredentialsRequest(context.Background(), k, key, nil)
+ err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req)
require.NoError(t, err)
- require.NotEmpty(t, secretRef)
require.Equal(t, 1, k.CreateCallCount())
+
+ _, obj, _ := k.CreateArgsForCall(0)
+ credReq, ok := obj.(*cloudcredentialv1.CredentialsRequest)
+ require.True(t, ok)
+
+ require.Equal(t, wantServiceAccountNames, credReq.Spec.ServiceAccountNames)
}
func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) {
wantRegion := "test-region"
- k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
+ lokistack := &lokiv1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "ns",
+ },
+ }
secret := &corev1.Secret{
Data: map[string][]byte{
"region": []byte(wantRegion),
},
}
- t.Setenv("CLIENTID", "test-client-id")
- t.Setenv("TENANTID", "test-tenant-id")
- t.Setenv("SUBSCRIPTIONID", "test-subscription-id")
+ k := credentialsRequestFakeClient(nil, lokistack, secret)
+ req := ctrl.Request{
+ NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
+ }
- secretRef, err := CreateCredentialsRequest(context.Background(), k, key, secret)
+ managedAuth := &config.ManagedAuthConfig{
+ Azure: &config.AzureEnvironment{
+ ClientID: "test-client-id",
+ SubscriptionID: "test-tenant-id",
+ TenantID: "test-subscription-id",
+ },
+ }
+
+ err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req)
require.NoError(t, err)
- require.NotEmpty(t, secretRef)
require.Equal(t, 1, k.CreateCallCount())
_, obj, _ := k.CreateArgsForCall(0)
@@ -66,17 +123,20 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) {
}
func TestCreateCredentialsRequest_CreateNewResourceAzure_Errors(t *testing.T) {
- k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
+ lokistack := &lokiv1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "ns",
+ },
+ }
+ req := ctrl.Request{
+ NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
+ }
tt := []struct {
secret *corev1.Secret
wantError string
}{
- {
- secret: nil,
- wantError: errAzureNoSecretFound.Error(),
- },
{
secret: &corev1.Secret{},
wantError: errAzureNoRegion.Error(),
@@ -86,29 +146,52 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure_Errors(t *testing.T) {
for _, tc := range tt {
tc := tc
t.Run(tc.wantError, func(t *testing.T) {
- // Not parallel (environment variables)
- t.Setenv("CLIENTID", "test-client-id")
- t.Setenv("TENANTID", "test-tenant-id")
- t.Setenv("SUBSCRIPTIONID", "test-subscription-id")
-
- _, err := CreateCredentialsRequest(context.Background(), k, key, tc.secret)
+ t.Parallel()
+
+ managedAuth := &config.ManagedAuthConfig{
+ Azure: &config.AzureEnvironment{
+ ClientID: "test-client-id",
+ SubscriptionID: "test-tenant-id",
+ TenantID: "test-subscription-id",
+ },
+ }
+ k := credentialsRequestFakeClient(nil, lokistack, tc.secret)
+
+ err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req)
require.EqualError(t, err, tc.wantError)
})
}
}
func TestCreateCredentialsRequest_DoNothing_WhenCredentialsRequestExist(t *testing.T) {
- k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
+ req := ctrl.Request{
+ NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
+ }
- t.Setenv("ROLEARN", "a-role-arn")
+ managedAuth := &config.ManagedAuthConfig{
+ AWS: &config.AWSEnvironment{
+ RoleARN: "a-role-arn",
+ },
+ }
- k.CreateStub = func(_ context.Context, _ client.Object, _ ...client.CreateOption) error {
- return errors.NewAlreadyExists(schema.GroupResource{}, "credentialsrequest exists")
+ cr := &cloudcredentialv1.CredentialsRequest{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "ns",
+ },
+ }
+ lokistack := &lokiv1.LokiStack{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "ns",
+ },
}
- secretRef, err := CreateCredentialsRequest(context.Background(), k, key, nil)
+ k := credentialsRequestFakeClient(cr, lokistack, nil)
+
+ err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req)
require.NoError(t, err)
- require.NotEmpty(t, secretRef)
- require.Equal(t, 1, k.CreateCallCount())
+ require.Equal(t, 2, k.GetCallCount())
+ require.Equal(t, 0, k.CreateCallCount())
+ require.Equal(t, 1, k.UpdateCallCount())
}
diff --git a/operator/internal/handlers/credentialsrequest_delete.go b/operator/internal/handlers/credentialsrequest_delete.go
deleted file mode 100644
index edf05fcb205d..000000000000
--- a/operator/internal/handlers/credentialsrequest_delete.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package handlers
-
-import (
- "context"
-
- "github.com/ViaQ/logerr/v2/kverrors"
- "k8s.io/apimachinery/pkg/api/errors"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/grafana/loki/operator/internal/external/k8s"
- "github.com/grafana/loki/operator/internal/manifests/openshift"
-)
-
-// DeleteCredentialsRequest deletes a LokiStack's accompanying CredentialsRequest resource
-// to trigger the OpenShift cloud-credentials-operator to wipe out any credentials related
-// Secret resource on the LokiStack namespace.
-func DeleteCredentialsRequest(ctx context.Context, k k8s.Client, stack client.ObjectKey) error {
- managedAuthEnv := openshift.DiscoverManagedAuthEnv()
- if managedAuthEnv == nil {
- return nil
- }
-
- opts := openshift.Options{
- BuildOpts: openshift.BuildOptions{
- LokiStackName: stack.Name,
- LokiStackNamespace: stack.Namespace,
- },
- ManagedAuthEnv: managedAuthEnv,
- }
-
- credReq, err := openshift.BuildCredentialsRequest(opts)
- if err != nil {
- return kverrors.Wrap(err, "failed to build credentialsrequest", "key", stack)
- }
-
- if err := k.Delete(ctx, credReq); err != nil {
- if !errors.IsNotFound(err) {
- return kverrors.Wrap(err, "failed to delete credentialsrequest", "key", client.ObjectKeyFromObject(credReq))
- }
- }
-
- return nil
-}
diff --git a/operator/internal/handlers/credentialsrequest_delete_test.go b/operator/internal/handlers/credentialsrequest_delete_test.go
deleted file mode 100644
index 57f1c005ee70..000000000000
--- a/operator/internal/handlers/credentialsrequest_delete_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package handlers
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
-)
-
-func TestDeleteCredentialsRequest_DoNothing_WhenManagedAuthEnvMissing(t *testing.T) {
- k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
-
- err := DeleteCredentialsRequest(context.Background(), k, key)
- require.NoError(t, err)
-}
-
-func TestDeleteCredentialsRequest_DeleteExistingResource(t *testing.T) {
- k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
-
- t.Setenv("ROLEARN", "a-role-arn")
-
- err := DeleteCredentialsRequest(context.Background(), k, key)
- require.NoError(t, err)
- require.Equal(t, 1, k.DeleteCallCount())
-}
-
-func TestDeleteCredentialsRequest_DoNothing_WhenCredentialsRequestNotExists(t *testing.T) {
- k := &k8sfakes.FakeClient{}
- key := client.ObjectKey{Name: "my-stack", Namespace: "ns"}
-
- t.Setenv("ROLEARN", "a-role-arn")
-
- k.DeleteStub = func(_ context.Context, _ client.Object, _ ...client.DeleteOption) error {
- return errors.NewNotFound(schema.GroupResource{}, "credentials request not found")
- }
-
- err := DeleteCredentialsRequest(context.Background(), k, key)
- require.NoError(t, err)
- require.Equal(t, 1, k.DeleteCallCount())
-}
diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
index 21cd58b7c3c2..99bafb911ec2 100644
--- a/operator/internal/handlers/internal/storage/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -59,15 +59,7 @@ func getSecrets(ctx context.Context, k k8s.Client, stack *lokiv1.LokiStack, fg c
}
if fg.OpenShift.ManagedAuthEnv {
- secretName, ok := stack.Annotations[storage.AnnotationCredentialsRequestsSecretRef]
- if !ok {
- return nil, nil, &status.DegradedError{
- Message: "Missing OpenShift cloud credentials request",
- Reason: lokiv1.ReasonMissingCredentialsRequest,
- Requeue: true,
- }
- }
-
+ secretName := storage.ManagedCredentialsSecretName(stack.Name)
managedAuthCredsKey := client.ObjectKey{Name: secretName, Namespace: stack.Namespace}
if err := k.Get(ctx, managedAuthCredsKey, &managedAuthSecret); err != nil {
if apierrors.IsNotFound(err) {
@@ -100,7 +92,7 @@ func extractSecrets(secretType lokiv1.ObjectStorageSecretType, objStore, managed
SharedStore: secretType,
}
- if fg.OpenShift.ManagedAuthEnabled() {
+ if fg.OpenShift.ManagedAuthEnv {
var managedAuthHash string
managedAuthHash, err = hashSecretData(managedAuth)
if err != nil {
@@ -190,11 +182,18 @@ func extractAzureConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*stor
// Extract and validate optional fields
endpointSuffix := s.Data[storage.KeyAzureStorageEndpointSuffix]
audience := s.Data[storage.KeyAzureAudience]
+ region := s.Data[storage.KeyAzureRegion]
if !workloadIdentity && len(audience) > 0 {
return nil, fmt.Errorf("%w: %s", errSecretFieldNotAllowed, storage.KeyAzureAudience)
}
+ if fg.OpenShift.ManagedAuthEnv {
+ if len(region) == 0 {
+ return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureRegion)
+ }
+ }
+
return &storage.AzureStorageConfig{
Env: string(env),
Container: string(container),
@@ -210,12 +209,7 @@ func validateAzureCredentials(s *corev1.Secret, fg configv1.FeatureGates) (workl
tenantID := s.Data[storage.KeyAzureStorageTenantID]
subscriptionID := s.Data[storage.KeyAzureStorageSubscriptionID]
- if fg.OpenShift.ManagedAuthEnabled() {
- region := s.Data[storage.KeyAzureRegion]
- if len(region) == 0 {
- return false, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureRegion)
- }
-
+ if fg.OpenShift.ManagedAuthEnv {
if len(accountKey) > 0 || len(clientID) > 0 || len(tenantID) > 0 || len(subscriptionID) > 0 {
return false, errAzureManagedIdentityNoOverride
}
@@ -282,8 +276,8 @@ func extractGCSConfigSecret(s *corev1.Secret) (*storage.GCSStorageConfig, error)
return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyGCPWorkloadIdentityProviderAudience)
}
- if credentialsFile.CredentialsSource.File != storage.GCPDefautCredentialsFile {
- return nil, fmt.Errorf("%w: %s", errGCPWrongCredentialSourceFile, storage.GCPDefautCredentialsFile)
+ if credentialsFile.CredentialsSource.File != storage.ServiceAccountTokenFilePath {
+ return nil, fmt.Errorf("%w: %s", errGCPWrongCredentialSourceFile, storage.ServiceAccountTokenFilePath)
}
}
@@ -330,7 +324,7 @@ func extractS3ConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*storage
)
switch {
- case fg.OpenShift.ManagedAuthEnabled():
+ case fg.OpenShift.ManagedAuthEnv:
cfg.STS = true
cfg.Audience = string(audience)
// Do not allow users overriding the role arn provided on Loki Operator installation
diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
index cc1836023231..1363cd4a660a 100644
--- a/operator/internal/handlers/internal/storage/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -71,11 +71,12 @@ func TestUnknownType(t *testing.T) {
func TestAzureExtract(t *testing.T) {
type test struct {
- name string
- secret *corev1.Secret
- managedSecret *corev1.Secret
- featureGates configv1.FeatureGates
- wantError string
+ name string
+ secret *corev1.Secret
+ managedSecret *corev1.Secret
+ featureGates configv1.FeatureGates
+ wantError string
+ wantCredentialMode lokiv1.CredentialMode
}
table := []test{
{
@@ -224,6 +225,7 @@ func TestAzureExtract(t *testing.T) {
"account_key": []byte("secret"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeStatic,
},
{
name: "mandatory for workload-identity set",
@@ -239,6 +241,7 @@ func TestAzureExtract(t *testing.T) {
"region": []byte("test-region"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeToken,
},
{
name: "mandatory for managed workload-identity set",
@@ -252,7 +255,14 @@ func TestAzureExtract(t *testing.T) {
},
},
managedSecret: &corev1.Secret{
- Data: map[string][]byte{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "managed-secret",
+ },
+ Data: map[string][]byte{
+ "azure_client_id": []byte("test-client-id"),
+ "azure_tenant_id": []byte("test-tenant-id"),
+ "azure_subscription_id": []byte("test-subscription-id"),
+ },
},
featureGates: configv1.FeatureGates{
OpenShift: configv1.OpenShiftFeatureGates{
@@ -260,6 +270,7 @@ func TestAzureExtract(t *testing.T) {
ManagedAuthEnv: true,
},
},
+ wantCredentialMode: lokiv1.CredentialModeManaged,
},
{
name: "all set including optional",
@@ -273,6 +284,7 @@ func TestAzureExtract(t *testing.T) {
"endpoint_suffix": []byte("suffix"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeStatic,
},
}
for _, tst := range table {
@@ -285,7 +297,8 @@ func TestAzureExtract(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, opts.SecretName)
require.NotEmpty(t, opts.SecretSHA1)
- require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretAzure)
+ require.Equal(t, lokiv1.ObjectStorageSecretAzure, opts.SharedStore)
+ require.Equal(t, tst.wantCredentialMode, opts.CredentialMode())
} else {
require.EqualError(t, err, tst.wantError)
}
@@ -295,9 +308,10 @@ func TestAzureExtract(t *testing.T) {
func TestGCSExtract(t *testing.T) {
type test struct {
- name string
- secret *corev1.Secret
- wantError string
+ name string
+ secret *corev1.Secret
+ wantError string
+ wantCredentialMode lokiv1.CredentialMode
}
table := []test{
{
@@ -332,10 +346,10 @@ func TestGCSExtract(t *testing.T) {
Data: map[string][]byte{
"bucketname": []byte("here"),
"audience": []byte("test"),
- "key.json": []byte("{\"type\": \"external_account\", \"credential_source\": {\"file\": \"/custom/path/to/secret/gcp/serviceaccount/token\"}}"),
+ "key.json": []byte("{\"type\": \"external_account\", \"credential_source\": {\"file\": \"/custom/path/to/secret/storage/serviceaccount/token\"}}"),
},
},
- wantError: "credential source in secret needs to point to token file: /var/run/secrets/gcp/serviceaccount/token",
+ wantError: "credential source in secret needs to point to token file: /var/run/secrets/storage/serviceaccount/token",
},
{
name: "all set",
@@ -346,6 +360,7 @@ func TestGCSExtract(t *testing.T) {
"key.json": []byte("{\"type\": \"service_account\"}"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeStatic,
},
{
name: "mandatory for workload-identity set",
@@ -354,9 +369,10 @@ func TestGCSExtract(t *testing.T) {
Data: map[string][]byte{
"bucketname": []byte("here"),
"audience": []byte("test"),
- "key.json": []byte("{\"type\": \"external_account\", \"credential_source\": {\"file\": \"/var/run/secrets/gcp/serviceaccount/token\"}}"),
+ "key.json": []byte("{\"type\": \"external_account\", \"credential_source\": {\"file\": \"/var/run/secrets/storage/serviceaccount/token\"}}"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeToken,
},
}
for _, tst := range table {
@@ -364,9 +380,10 @@ func TestGCSExtract(t *testing.T) {
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- _, err := extractSecrets(lokiv1.ObjectStorageSecretGCS, tst.secret, nil, configv1.FeatureGates{})
+ opts, err := extractSecrets(lokiv1.ObjectStorageSecretGCS, tst.secret, nil, configv1.FeatureGates{})
if tst.wantError == "" {
require.NoError(t, err)
+ require.Equal(t, tst.wantCredentialMode, opts.CredentialMode())
} else {
require.EqualError(t, err, tst.wantError)
}
@@ -376,9 +393,10 @@ func TestGCSExtract(t *testing.T) {
func TestS3Extract(t *testing.T) {
type test struct {
- name string
- secret *corev1.Secret
- wantError string
+ name string
+ secret *corev1.Secret
+ wantError string
+ wantCredentialMode lokiv1.CredentialMode
}
table := []test{
{
@@ -456,6 +474,7 @@ func TestS3Extract(t *testing.T) {
"sse_kms_key_id": []byte("kms-key-id"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeStatic,
},
{
name: "all set with SSE-KMS with encryption context",
@@ -471,6 +490,7 @@ func TestS3Extract(t *testing.T) {
"sse_kms_encryption_context": []byte("kms-encryption-ctx"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeStatic,
},
{
name: "all set with SSE-S3",
@@ -484,6 +504,7 @@ func TestS3Extract(t *testing.T) {
"sse_type": []byte("SSE-S3"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeStatic,
},
{
name: "all set without SSE",
@@ -496,6 +517,7 @@ func TestS3Extract(t *testing.T) {
"access_key_secret": []byte("secret"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeStatic,
},
{
name: "STS missing region",
@@ -518,6 +540,7 @@ func TestS3Extract(t *testing.T) {
"region": []byte("here"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeToken,
},
{
name: "STS all set",
@@ -530,6 +553,7 @@ func TestS3Extract(t *testing.T) {
"audience": []byte("audience"),
},
},
+ wantCredentialMode: lokiv1.CredentialModeToken,
},
}
for _, tst := range table {
@@ -542,7 +566,8 @@ func TestS3Extract(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, opts.SecretName)
require.NotEmpty(t, opts.SecretSHA1)
- require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretS3)
+ require.Equal(t, lokiv1.ObjectStorageSecretS3, opts.SharedStore)
+ require.Equal(t, tst.wantCredentialMode, opts.CredentialMode())
} else {
require.EqualError(t, err, tst.wantError)
}
@@ -616,10 +641,11 @@ func TestS3Extract_WithOpenShiftManagedAuth(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, opts.SecretName)
require.NotEmpty(t, opts.SecretSHA1)
- require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretS3)
+ require.Equal(t, lokiv1.ObjectStorageSecretS3, opts.SharedStore)
require.True(t, opts.S3.STS)
- require.Equal(t, opts.OpenShift.CloudCredentials.SecretName, tst.managedAuthSecret.Name)
+ require.Equal(t, tst.managedAuthSecret.Name, opts.OpenShift.CloudCredentials.SecretName)
require.NotEmpty(t, opts.OpenShift.CloudCredentials.SHA1)
+ require.Equal(t, lokiv1.CredentialModeManaged, opts.CredentialMode())
} else {
require.EqualError(t, err, tst.wantError)
}
@@ -767,7 +793,8 @@ func TestSwiftExtract(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, opts.SecretName)
require.NotEmpty(t, opts.SecretSHA1)
- require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretSwift)
+ require.Equal(t, lokiv1.ObjectStorageSecretSwift, opts.SharedStore)
+ require.Equal(t, lokiv1.CredentialModeStatic, opts.CredentialMode())
} else {
require.EqualError(t, err, tst.wantError)
}
@@ -840,7 +867,8 @@ func TestAlibabaCloudExtract(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, opts.SecretName)
require.NotEmpty(t, opts.SecretSHA1)
- require.Equal(t, opts.SharedStore, lokiv1.ObjectStorageSecretAlibabaCloud)
+ require.Equal(t, lokiv1.ObjectStorageSecretAlibabaCloud, opts.SharedStore)
+ require.Equal(t, lokiv1.CredentialModeStatic, opts.CredentialMode())
} else {
require.EqualError(t, err, tst.wantError)
}
diff --git a/operator/internal/handlers/internal/storage/storage_test.go b/operator/internal/handlers/internal/storage/storage_test.go
index 9e041bf99a23..45f5b0f2865b 100644
--- a/operator/internal/handlers/internal/storage/storage_test.go
+++ b/operator/internal/handlers/internal/storage/storage_test.go
@@ -17,7 +17,6 @@ import (
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/grafana/loki/operator/internal/status"
)
@@ -135,77 +134,6 @@ func TestBuildOptions_WhenMissingSecret_SetDegraded(t *testing.T) {
require.Equal(t, degradedErr, err)
}
-func TestBuildOptions_WhenMissingCloudCredentialsRequest_SetDegraded(t *testing.T) {
- sw := &k8sfakes.FakeStatusWriter{}
- k := &k8sfakes.FakeClient{}
- r := ctrl.Request{
- NamespacedName: types.NamespacedName{
- Name: "my-stack",
- Namespace: "some-ns",
- },
- }
-
- fg := configv1.FeatureGates{
- OpenShift: configv1.OpenShiftFeatureGates{
- ManagedAuthEnv: true,
- },
- }
-
- degradedErr := &status.DegradedError{
- Message: "Missing OpenShift cloud credentials request",
- Reason: lokiv1.ReasonMissingCredentialsRequest,
- Requeue: true,
- }
-
- stack := &lokiv1.LokiStack{
- TypeMeta: metav1.TypeMeta{
- Kind: "LokiStack",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "my-stack",
- Namespace: "some-ns",
- UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
- Annotations: map[string]string{},
- },
- Spec: lokiv1.LokiStackSpec{
- Size: lokiv1.SizeOneXExtraSmall,
- Storage: lokiv1.ObjectStorageSpec{
- Schemas: []lokiv1.ObjectStorageSchema{
- {
- Version: lokiv1.ObjectStorageSchemaV11,
- EffectiveDate: "2020-10-11",
- },
- },
- Secret: lokiv1.ObjectStorageSecretSpec{
- Name: defaultManagedAuthSecret.Name,
- Type: lokiv1.ObjectStorageSecretS3,
- },
- },
- },
- }
-
- k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
- _, isLokiStack := object.(*lokiv1.LokiStack)
- if r.Name == name.Name && r.Namespace == name.Namespace && isLokiStack {
- k.SetClientObject(object, stack)
- return nil
- }
- if name.Name == defaultManagedAuthSecret.Name {
- k.SetClientObject(object, &defaultManagedAuthSecret)
- return nil
- }
- return apierrors.NewNotFound(schema.GroupResource{}, "something is not found")
- }
-
- k.StatusStub = func() client.StatusWriter { return sw }
-
- _, err := BuildOptions(context.TODO(), k, stack, fg)
-
- // make sure error is returned
- require.Error(t, err)
- require.Equal(t, degradedErr, err)
-}
-
func TestBuildOptions_WhenMissingCloudCredentialsSecret_SetDegraded(t *testing.T) {
sw := &k8sfakes.FakeStatusWriter{}
k := &k8sfakes.FakeClient{}
@@ -236,9 +164,6 @@ func TestBuildOptions_WhenMissingCloudCredentialsSecret_SetDegraded(t *testing.T
Name: "my-stack",
Namespace: "some-ns",
UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
- Annotations: map[string]string{
- storage.AnnotationCredentialsRequestsSecretRef: "my-stack-aws-creds",
- },
},
Spec: lokiv1.LokiStackSpec{
Size: lokiv1.SizeOneXExtraSmall,
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index 2f78f75d02c5..47e7a309bf8b 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -36,7 +36,7 @@ func CreateOrUpdateLokiStack(
k k8s.Client,
s *runtime.Scheme,
fg configv1.FeatureGates,
-) error {
+) (lokiv1.CredentialMode, error) {
ll := log.WithValues("lokistack", req.NamespacedName, "event", "createOrUpdate")
var stack lokiv1.LokiStack
@@ -44,9 +44,9 @@ func CreateOrUpdateLokiStack(
if apierrors.IsNotFound(err) {
// maybe the user deleted it before we could react? Either way this isn't an issue
ll.Error(err, "could not find the requested loki stack", "name", req.NamespacedName)
- return nil
+ return "", nil
}
- return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName)
+ return "", kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName)
}
img := os.Getenv(manifests.EnvRelatedImageLoki)
@@ -61,21 +61,21 @@ func CreateOrUpdateLokiStack(
objStore, err := storage.BuildOptions(ctx, k, &stack, fg)
if err != nil {
- return err
+ return "", err
}
baseDomain, tenants, err := gateway.BuildOptions(ctx, ll, k, &stack, fg)
if err != nil {
- return err
+ return "", err
}
if err = rules.Cleanup(ctx, ll, k, &stack); err != nil {
- return err
+ return "", err
}
alertingRules, recordingRules, ruler, ocpOptions, err := rules.BuildOptions(ctx, ll, k, &stack)
if err != nil {
- return err
+ return "", err
}
certRotationRequiredAt := ""
@@ -86,7 +86,7 @@ func CreateOrUpdateLokiStack(
timeoutConfig, err := manifests.NewTimeoutConfig(stack.Spec.Limits)
if err != nil {
ll.Error(err, "failed to parse query timeout")
- return &status.DegradedError{
+ return "", &status.DegradedError{
Message: fmt.Sprintf("Error parsing query timeout: %s", err),
Reason: lokiv1.ReasonQueryTimeoutInvalid,
Requeue: false,
@@ -116,13 +116,13 @@ func CreateOrUpdateLokiStack(
if optErr := manifests.ApplyDefaultSettings(&opts); optErr != nil {
ll.Error(optErr, "failed to conform options to build settings")
- return optErr
+ return "", optErr
}
if fg.LokiStackGateway {
if optErr := manifests.ApplyGatewayDefaultOptions(&opts); optErr != nil {
ll.Error(optErr, "failed to apply defaults options to gateway settings")
- return optErr
+ return "", optErr
}
}
@@ -140,13 +140,13 @@ func CreateOrUpdateLokiStack(
if optErr := manifests.ApplyTLSSettings(&opts, tlsProfile); optErr != nil {
ll.Error(optErr, "failed to conform options to tls profile settings")
- return optErr
+ return "", optErr
}
objects, err := manifests.BuildAll(opts)
if err != nil {
ll.Error(err, "failed to build manifests")
- return err
+ return "", err
}
ll.Info("manifests built", "count", len(objects))
@@ -158,7 +158,7 @@ func CreateOrUpdateLokiStack(
// a user possibly being unable to read logs.
if err := status.SetStorageSchemaStatus(ctx, k, req, objStore.Schemas); err != nil {
ll.Error(err, "failed to set storage schema status")
- return err
+ return "", err
}
var errCount int32
@@ -182,7 +182,7 @@ func CreateOrUpdateLokiStack(
depAnnotations, err := dependentAnnotations(ctx, k, obj)
if err != nil {
l.Error(err, "failed to set dependent annotations")
- return err
+ return "", err
}
desired := obj.DeepCopyObject().(client.Object)
@@ -205,7 +205,7 @@ func CreateOrUpdateLokiStack(
}
if errCount > 0 {
- return kverrors.New("failed to configure lokistack resources", "name", req.NamespacedName)
+ return "", kverrors.New("failed to configure lokistack resources", "name", req.NamespacedName)
}
// 1x.demo is used only for development, so the metrics will not
@@ -214,7 +214,7 @@ func CreateOrUpdateLokiStack(
metrics.Collect(&opts.Stack, opts.Name)
}
- return nil
+ return objStore.CredentialMode(), nil
}
func dependentAnnotations(ctx context.Context, k k8s.Client, obj client.Object) (map[string]string, error) {
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 4ba9a9affc36..bef5ffc9efb7 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -108,7 +108,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNotFound_DoesNotError(t *testing.
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
require.NoError(t, err)
// make sure create was NOT called because the Get failed
@@ -132,7 +132,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsAnErrorOtherThanNotFound_ReturnsT
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
require.Equal(t, badRequestErr, errors.Unwrap(err))
@@ -219,7 +219,7 @@ func TestCreateOrUpdateLokiStack_SetsNamespaceOnAllObjects(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
require.NoError(t, err)
// make sure create was called
@@ -327,7 +327,7 @@ func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
require.NoError(t, err)
// make sure create was called
@@ -387,7 +387,7 @@ func TestCreateOrUpdateLokiStack_WhenSetControllerRefInvalid_ContinueWithOtherOb
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -490,7 +490,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing.
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
require.NoError(t, err)
// make sure create not called
@@ -556,7 +556,7 @@ func TestCreateOrUpdateLokiStack_WhenCreateReturnsError_ContinueWithOtherObjects
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -663,7 +663,7 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -734,7 +734,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidQueryTimeout_SetDegraded(t *testing.
k.StatusStub = func() client.StatusWriter { return sw }
- err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+ _, err := CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
// make sure error is returned
require.Error(t, err)
diff --git a/operator/internal/manifests/mutate.go b/operator/internal/manifests/mutate.go
index 27421750bf2c..63308bb9ceb6 100644
--- a/operator/internal/manifests/mutate.go
+++ b/operator/internal/manifests/mutate.go
@@ -6,6 +6,7 @@ import (
"github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
routev1 "github.com/openshift/api/route/v1"
+ cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -123,6 +124,11 @@ func MutateFuncFor(existing, desired client.Object, depAnnotations map[string]st
wantRt := desired.(*routev1.Route)
mutateRoute(rt, wantRt)
+ case *cloudcredentialv1.CredentialsRequest:
+ cr := existing.(*cloudcredentialv1.CredentialsRequest)
+ wantCr := desired.(*cloudcredentialv1.CredentialsRequest)
+ mutateCredentialRequest(cr, wantCr)
+
case *monitoringv1.PrometheusRule:
pr := existing.(*monitoringv1.PrometheusRule)
wantPr := desired.(*monitoringv1.PrometheusRule)
@@ -213,6 +219,10 @@ func mutateRoute(existing, desired *routev1.Route) {
existing.Spec = desired.Spec
}
+func mutateCredentialRequest(existing, desired *cloudcredentialv1.CredentialsRequest) {
+ existing.Spec = desired.Spec
+}
+
func mutatePrometheusRule(existing, desired *monitoringv1.PrometheusRule) {
existing.Annotations = desired.Annotations
existing.Labels = desired.Labels
diff --git a/operator/internal/manifests/openshift/credentialsrequest.go b/operator/internal/manifests/openshift/credentialsrequest.go
index 2962b61d0d1e..0e97dd97c2b1 100644
--- a/operator/internal/manifests/openshift/credentialsrequest.go
+++ b/operator/internal/manifests/openshift/credentialsrequest.go
@@ -1,10 +1,6 @@
package openshift
import (
- "fmt"
- "os"
- "path"
-
"github.com/ViaQ/logerr/v2/kverrors"
cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1"
corev1 "k8s.io/api/core/v1"
@@ -12,32 +8,26 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "github.com/grafana/loki/operator/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
-const (
- ccoNamespace = "openshift-cloud-credential-operator"
-)
-
func BuildCredentialsRequest(opts Options) (*cloudcredentialv1.CredentialsRequest, error) {
stack := client.ObjectKey{Name: opts.BuildOpts.LokiStackName, Namespace: opts.BuildOpts.LokiStackNamespace}
- providerSpec, secretName, err := encodeProviderSpec(opts.BuildOpts.LokiStackName, opts.ManagedAuthEnv)
+ providerSpec, err := encodeProviderSpec(opts.ManagedAuth)
if err != nil {
return nil, kverrors.Wrap(err, "failed encoding credentialsrequest provider spec")
}
return &cloudcredentialv1.CredentialsRequest{
ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-%s", stack.Namespace, secretName),
- Namespace: ccoNamespace,
- Annotations: map[string]string{
- AnnotationCredentialsRequestOwner: stack.String(),
- },
+ Name: stack.Name,
+ Namespace: stack.Namespace,
},
Spec: cloudcredentialv1.CredentialsRequestSpec{
SecretRef: corev1.ObjectReference{
- Name: secretName,
+ Name: storage.ManagedCredentialsSecretName(stack.Name),
Namespace: stack.Namespace,
},
ProviderSpec: providerSpec,
@@ -45,16 +35,13 @@ func BuildCredentialsRequest(opts Options) (*cloudcredentialv1.CredentialsReques
stack.Name,
rulerServiceAccountName(opts),
},
- CloudTokenPath: path.Join(storage.AWSTokenVolumeDirectory, "token"),
+ CloudTokenPath: storage.ServiceAccountTokenFilePath,
},
}, nil
}
-func encodeProviderSpec(stackName string, env *ManagedAuthEnv) (*runtime.RawExtension, string, error) {
- var (
- spec runtime.Object
- secretName string
- )
+func encodeProviderSpec(env *config.ManagedAuthConfig) (*runtime.RawExtension, error) {
+ var spec runtime.Object
switch {
case env.AWS != nil:
@@ -73,7 +60,6 @@ func encodeProviderSpec(stackName string, env *ManagedAuthEnv) (*runtime.RawExte
},
STSIAMRoleARN: env.AWS.RoleARN,
}
- secretName = fmt.Sprintf("%s-aws-creds", stackName)
case env.Azure != nil:
azure := env.Azure
@@ -101,38 +87,8 @@ func encodeProviderSpec(stackName string, env *ManagedAuthEnv) (*runtime.RawExte
AzureSubscriptionID: azure.SubscriptionID,
AzureTenantID: azure.TenantID,
}
- secretName = fmt.Sprintf("%s-azure-creds", stackName)
}
encodedSpec, err := cloudcredentialv1.Codec.EncodeProviderSpec(spec.DeepCopyObject())
- return encodedSpec, secretName, err
-}
-
-func DiscoverManagedAuthEnv() *ManagedAuthEnv {
- // AWS
- roleARN := os.Getenv("ROLEARN")
-
- // Azure
- clientID := os.Getenv("CLIENTID")
- tenantID := os.Getenv("TENANTID")
- subscriptionID := os.Getenv("SUBSCRIPTIONID")
-
- switch {
- case roleARN != "":
- return &ManagedAuthEnv{
- AWS: &AWSSTSEnv{
- RoleARN: roleARN,
- },
- }
- case clientID != "" && tenantID != "" && subscriptionID != "":
- return &ManagedAuthEnv{
- Azure: &AzureWIFEnvironment{
- ClientID: clientID,
- SubscriptionID: subscriptionID,
- TenantID: tenantID,
- },
- }
- }
-
- return nil
+ return encodedSpec, err
}
diff --git a/operator/internal/manifests/openshift/credentialsrequest_test.go b/operator/internal/manifests/openshift/credentialsrequest_test.go
index 21b193c8c7d7..36c6e2331f7e 100644
--- a/operator/internal/manifests/openshift/credentialsrequest_test.go
+++ b/operator/internal/manifests/openshift/credentialsrequest_test.go
@@ -1,40 +1,22 @@
package openshift
import (
- "strings"
"testing"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/operator/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
-func TestBuildCredentialsRequest_HasOwnerAnnotation(t *testing.T) {
- opts := Options{
- BuildOpts: BuildOptions{
- LokiStackName: "a-stack",
- LokiStackNamespace: "ns",
- },
- ManagedAuthEnv: &ManagedAuthEnv{
- AWS: &AWSSTSEnv{
- RoleARN: "role-arn",
- },
- },
- }
-
- credReq, err := BuildCredentialsRequest(opts)
- require.NoError(t, err)
- require.Contains(t, credReq.Annotations, AnnotationCredentialsRequestOwner)
-}
-
func TestBuildCredentialsRequest_HasSecretRef_MatchingLokiStackNamespace(t *testing.T) {
opts := Options{
BuildOpts: BuildOptions{
LokiStackName: "a-stack",
LokiStackNamespace: "ns",
},
- ManagedAuthEnv: &ManagedAuthEnv{
- AWS: &AWSSTSEnv{
+ ManagedAuth: &config.ManagedAuthConfig{
+ AWS: &config.AWSEnvironment{
RoleARN: "role-arn",
},
},
@@ -51,8 +33,8 @@ func TestBuildCredentialsRequest_HasServiceAccountNames_ContainsAllLokiStackServ
LokiStackName: "a-stack",
LokiStackNamespace: "ns",
},
- ManagedAuthEnv: &ManagedAuthEnv{
- AWS: &AWSSTSEnv{
+ ManagedAuth: &config.ManagedAuthConfig{
+ AWS: &config.AWSEnvironment{
RoleARN: "role-arn",
},
},
@@ -70,8 +52,8 @@ func TestBuildCredentialsRequest_CloudTokenPath_MatchinOpenShiftSADirectory(t *t
LokiStackName: "a-stack",
LokiStackNamespace: "ns",
},
- ManagedAuthEnv: &ManagedAuthEnv{
- AWS: &AWSSTSEnv{
+ ManagedAuth: &config.ManagedAuthConfig{
+ AWS: &config.AWSEnvironment{
RoleARN: "role-arn",
},
},
@@ -79,7 +61,7 @@ func TestBuildCredentialsRequest_CloudTokenPath_MatchinOpenShiftSADirectory(t *t
credReq, err := BuildCredentialsRequest(opts)
require.NoError(t, err)
- require.True(t, strings.HasPrefix(credReq.Spec.CloudTokenPath, storage.AWSTokenVolumeDirectory))
+ require.Equal(t, storage.ServiceAccountTokenFilePath, credReq.Spec.CloudTokenPath)
}
func TestBuildCredentialsRequest_FollowsNamingConventions(t *testing.T) {
@@ -96,14 +78,14 @@ func TestBuildCredentialsRequest_FollowsNamingConventions(t *testing.T) {
LokiStackName: "a-stack",
LokiStackNamespace: "ns",
},
- ManagedAuthEnv: &ManagedAuthEnv{
- AWS: &AWSSTSEnv{
+ ManagedAuth: &config.ManagedAuthConfig{
+ AWS: &config.AWSEnvironment{
RoleARN: "role-arn",
},
},
},
- wantName: "ns-a-stack-aws-creds",
- wantSecretName: "a-stack-aws-creds",
+ wantName: "a-stack",
+ wantSecretName: "a-stack-managed-credentials",
},
}
for _, test := range tests {
diff --git a/operator/internal/manifests/openshift/options.go b/operator/internal/manifests/openshift/options.go
index 9bc2e4faae36..572db7fe6445 100644
--- a/operator/internal/manifests/openshift/options.go
+++ b/operator/internal/manifests/openshift/options.go
@@ -6,6 +6,7 @@ import (
"time"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/config"
)
// Options is the set of internal template options for rendering
@@ -14,7 +15,7 @@ type Options struct {
BuildOpts BuildOptions
Authentication []AuthenticationSpec
Authorization AuthorizationSpec
- ManagedAuthEnv *ManagedAuthEnv
+ ManagedAuth *config.ManagedAuthConfig
}
// AuthenticationSpec describes the authentication specification
@@ -55,22 +56,6 @@ type TenantData struct {
CookieSecret string
}
-type AWSSTSEnv struct {
- RoleARN string
-}
-
-type AzureWIFEnvironment struct {
- ClientID string
- SubscriptionID string
- TenantID string
- Region string
-}
-
-type ManagedAuthEnv struct {
- AWS *AWSSTSEnv
- Azure *AzureWIFEnvironment
-}
-
// NewOptions returns an openshift options struct.
func NewOptions(
stackName, stackNamespace string,
diff --git a/operator/internal/manifests/openshift/var.go b/operator/internal/manifests/openshift/var.go
index 84928c48d7e2..5e3ac6300e3e 100644
--- a/operator/internal/manifests/openshift/var.go
+++ b/operator/internal/manifests/openshift/var.go
@@ -48,8 +48,6 @@ var (
MonitoringSVCUserWorkload = "alertmanager-user-workload"
MonitoringUserWorkloadNS = "openshift-user-workload-monitoring"
-
- AnnotationCredentialsRequestOwner = "loki.grafana.com/credentialsrequest-owner"
)
func authorizerRbacName(componentName string) string {
diff --git a/operator/internal/manifests/storage/configure.go b/operator/internal/manifests/storage/configure.go
index 49958ebec7b9..ede098425323 100644
--- a/operator/internal/manifests/storage/configure.go
+++ b/operator/internal/manifests/storage/configure.go
@@ -13,6 +13,18 @@ import (
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
+var (
+ managedAuthConfigVolumeMount = corev1.VolumeMount{
+ Name: managedAuthConfigVolumeName,
+ MountPath: managedAuthConfigDirectory,
+ }
+
+ saTokenVolumeMount = corev1.VolumeMount{
+ Name: saTokenVolumeName,
+ MountPath: saTokenVolumeMountPath,
+ }
+)
+
// ConfigureDeployment appends additional pod volumes and container env vars, args, volume mounts
// based on the object storage type. Currently supported amendments:
// - All: Ensure object storage secret mounted and auth projected as env vars.
@@ -127,11 +139,11 @@ func ensureObjectStoreCredentials(p *corev1.PodSpec, opts Options) corev1.PodSpe
if managedAuthEnabled(opts) {
container.Env = append(container.Env, managedAuthCredentials(opts)...)
volumes = append(volumes, saTokenVolume(opts))
- container.VolumeMounts = append(container.VolumeMounts, saTokenVolumeMount(opts))
+ container.VolumeMounts = append(container.VolumeMounts, saTokenVolumeMount)
- if opts.OpenShift.ManagedAuthEnabled() {
- volumes = append(volumes, managedAuthVolume(opts))
- container.VolumeMounts = append(container.VolumeMounts, managedAuthVolumeMount(opts))
+ if opts.OpenShift.ManagedAuthEnabled() && opts.S3 != nil && opts.S3.STS {
+ volumes = append(volumes, managedAuthConfigVolume(opts))
+ container.VolumeMounts = append(container.VolumeMounts, managedAuthConfigVolumeMount)
}
} else {
container.Env = append(container.Env, staticAuthCredentials(opts)...)
@@ -183,13 +195,13 @@ func managedAuthCredentials(opts Options) []corev1.EnvVar {
case lokiv1.ObjectStorageSecretS3:
if opts.OpenShift.ManagedAuthEnabled() {
return []corev1.EnvVar{
- envVarFromValue(EnvAWSCredentialsFile, path.Join(managedAuthSecretDirectory, KeyAWSCredentialsFilename)),
+ envVarFromValue(EnvAWSCredentialsFile, path.Join(managedAuthConfigDirectory, KeyAWSCredentialsFilename)),
envVarFromValue(EnvAWSSdkLoadConfig, "true"),
}
} else {
return []corev1.EnvVar{
envVarFromSecret(EnvAWSRoleArn, opts.SecretName, KeyAWSRoleArn),
- envVarFromValue(EnvAWSWebIdentityTokenFile, path.Join(AWSTokenVolumeDirectory, "token")),
+ envVarFromValue(EnvAWSWebIdentityTokenFile, ServiceAccountTokenFilePath),
}
}
case lokiv1.ObjectStorageSecretAzure:
@@ -199,7 +211,7 @@ func managedAuthCredentials(opts Options) []corev1.EnvVar {
envVarFromSecret(EnvAzureClientID, opts.OpenShift.CloudCredentials.SecretName, azureManagedCredentialKeyClientID),
envVarFromSecret(EnvAzureTenantID, opts.OpenShift.CloudCredentials.SecretName, azureManagedCredentialKeyTenantID),
envVarFromSecret(EnvAzureSubscriptionID, opts.OpenShift.CloudCredentials.SecretName, azureManagedCredentialKeySubscriptionID),
- envVarFromValue(EnvAzureFederatedTokenFile, path.Join(azureTokenVolumeDirectory, "token")),
+ envVarFromValue(EnvAzureFederatedTokenFile, ServiceAccountTokenFilePath),
}
}
@@ -208,7 +220,7 @@ func managedAuthCredentials(opts Options) []corev1.EnvVar {
envVarFromSecret(EnvAzureClientID, opts.SecretName, KeyAzureStorageClientID),
envVarFromSecret(EnvAzureTenantID, opts.SecretName, KeyAzureStorageTenantID),
envVarFromSecret(EnvAzureSubscriptionID, opts.SecretName, KeyAzureStorageSubscriptionID),
- envVarFromValue(EnvAzureFederatedTokenFile, path.Join(azureTokenVolumeDirectory, "token")),
+ envVarFromValue(EnvAzureFederatedTokenFile, ServiceAccountTokenFilePath),
}
case lokiv1.ObjectStorageSecretGCS:
return []corev1.EnvVar{
@@ -301,22 +313,6 @@ func managedAuthEnabled(opts Options) bool {
}
}
-func saTokenVolumeMount(opts Options) corev1.VolumeMount {
- var tokenPath string
- switch opts.SharedStore {
- case lokiv1.ObjectStorageSecretS3:
- tokenPath = AWSTokenVolumeDirectory
- case lokiv1.ObjectStorageSecretAzure:
- tokenPath = azureTokenVolumeDirectory
- case lokiv1.ObjectStorageSecretGCS:
- tokenPath = gcpTokenVolumeDirectory
- }
- return corev1.VolumeMount{
- Name: saTokenVolumeName,
- MountPath: tokenPath,
- }
-}
-
func saTokenVolume(opts Options) corev1.Volume {
var audience string
storeType := opts.SharedStore
@@ -352,16 +348,9 @@ func saTokenVolume(opts Options) corev1.Volume {
}
}
-func managedAuthVolumeMount(opts Options) corev1.VolumeMount {
- return corev1.VolumeMount{
- Name: opts.OpenShift.CloudCredentials.SecretName,
- MountPath: managedAuthSecretDirectory,
- }
-}
-
-func managedAuthVolume(opts Options) corev1.Volume {
+func managedAuthConfigVolume(opts Options) corev1.Volume {
return corev1.Volume{
- Name: opts.OpenShift.CloudCredentials.SecretName,
+ Name: managedAuthConfigVolumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: opts.OpenShift.CloudCredentials.SecretName,
diff --git a/operator/internal/manifests/storage/configure_test.go b/operator/internal/manifests/storage/configure_test.go
index f17a9af6c352..2cd7b079a4b4 100644
--- a/operator/internal/manifests/storage/configure_test.go
+++ b/operator/internal/manifests/storage/configure_test.go
@@ -206,7 +206,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/azure/serviceaccount",
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -256,7 +256,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
{
Name: EnvAzureFederatedTokenFile,
- Value: "/var/run/secrets/azure/serviceaccount/token",
+ Value: "/var/run/secrets/storage/serviceaccount/token",
},
},
},
@@ -331,7 +331,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/azure/serviceaccount",
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -381,7 +381,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
{
Name: EnvAzureFederatedTokenFile,
- Value: "/var/run/secrets/azure/serviceaccount/token",
+ Value: "/var/run/secrets/storage/serviceaccount/token",
},
},
},
@@ -462,11 +462,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/azure/serviceaccount",
- },
- {
- Name: "cloud-credentials",
- MountPath: managedAuthSecretDirectory,
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -516,7 +512,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
{
Name: EnvAzureFederatedTokenFile,
- Value: "/var/run/secrets/azure/serviceaccount/token",
+ Value: "/var/run/secrets/storage/serviceaccount/token",
},
},
},
@@ -546,14 +542,6 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
},
},
- {
- Name: "cloud-credentials",
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "cloud-credentials",
- },
- },
- },
},
},
},
@@ -655,7 +643,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/gcp/serviceaccount",
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -810,7 +798,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/aws/serviceaccount",
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -827,7 +815,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
{
Name: "AWS_WEB_IDENTITY_TOKEN_FILE",
- Value: "/var/run/secrets/aws/serviceaccount/token",
+ Value: "/var/run/secrets/storage/serviceaccount/token",
},
},
},
@@ -908,13 +896,9 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/aws/serviceaccount",
- },
- {
- Name: "cloud-credentials",
- ReadOnly: false,
- MountPath: "/etc/storage/managed-auth",
+ MountPath: saTokenVolumeMountPath,
},
+ managedAuthConfigVolumeMount,
},
Env: []corev1.EnvVar{
{
@@ -954,7 +938,7 @@ func TestConfigureDeploymentForStorageType(t *testing.T) {
},
},
{
- Name: "cloud-credentials",
+ Name: managedAuthConfigVolumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "cloud-credentials",
@@ -1340,7 +1324,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/azure/serviceaccount",
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -1390,7 +1374,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
{
Name: EnvAzureFederatedTokenFile,
- Value: "/var/run/secrets/azure/serviceaccount/token",
+ Value: "/var/run/secrets/storage/serviceaccount/token",
},
},
},
@@ -1465,7 +1449,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/azure/serviceaccount",
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -1515,7 +1499,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
{
Name: EnvAzureFederatedTokenFile,
- Value: "/var/run/secrets/azure/serviceaccount/token",
+ Value: "/var/run/secrets/storage/serviceaccount/token",
},
},
},
@@ -1596,11 +1580,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/azure/serviceaccount",
- },
- {
- Name: "cloud-credentials",
- MountPath: managedAuthSecretDirectory,
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -1650,7 +1630,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
{
Name: EnvAzureFederatedTokenFile,
- Value: "/var/run/secrets/azure/serviceaccount/token",
+ Value: "/var/run/secrets/storage/serviceaccount/token",
},
},
},
@@ -1680,14 +1660,6 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
},
},
- {
- Name: "cloud-credentials",
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "cloud-credentials",
- },
- },
- },
},
},
},
@@ -1789,7 +1761,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/gcp/serviceaccount",
+ MountPath: saTokenVolumeMountPath,
},
},
Env: []corev1.EnvVar{
@@ -1950,13 +1922,9 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
{
Name: saTokenVolumeName,
ReadOnly: false,
- MountPath: "/var/run/secrets/aws/serviceaccount",
- },
- {
- Name: "cloud-credentials",
- ReadOnly: false,
- MountPath: "/etc/storage/managed-auth",
+ MountPath: saTokenVolumeMountPath,
},
+ managedAuthConfigVolumeMount,
},
Env: []corev1.EnvVar{
{
@@ -1996,7 +1964,7 @@ func TestConfigureStatefulSetForStorageType(t *testing.T) {
},
},
{
- Name: "cloud-credentials",
+ Name: managedAuthConfigVolumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "cloud-credentials",
diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go
index e525640da6c0..6693d2261e97 100644
--- a/operator/internal/manifests/storage/options.go
+++ b/operator/internal/manifests/storage/options.go
@@ -23,12 +23,47 @@ type Options struct {
OpenShift OpenShiftOptions
}
+// CredentialMode returns which mode is used by the current storage configuration.
+// This defaults to CredentialModeStatic, but can be CredentialModeToken
+// or CredentialModeManaged depending on the object storage provide, the provided
+// secret and whether the operator is running in a managed-auth cluster.
+func (o Options) CredentialMode() lokiv1.CredentialMode {
+ if o.Azure != nil {
+ if o.OpenShift.ManagedAuthEnabled() {
+ return lokiv1.CredentialModeManaged
+ }
+
+ if o.Azure.WorkloadIdentity {
+ return lokiv1.CredentialModeToken
+ }
+ }
+
+ if o.GCS != nil {
+ if o.GCS.WorkloadIdentity {
+ return lokiv1.CredentialModeToken
+ }
+ }
+
+ if o.S3 != nil {
+ if o.OpenShift.ManagedAuthEnabled() {
+ return lokiv1.CredentialModeManaged
+ }
+
+ if o.S3.STS {
+ return lokiv1.CredentialModeToken
+ }
+ }
+
+ return lokiv1.CredentialModeStatic
+}
+
// AzureStorageConfig for Azure storage config
type AzureStorageConfig struct {
Env string
Container string
EndpointSuffix string
Audience string
+ Region string
WorkloadIdentity bool
}
diff --git a/operator/internal/manifests/storage/var.go b/operator/internal/manifests/storage/var.go
index 49ec0b0a16ae..cbd944a821c3 100644
--- a/operator/internal/manifests/storage/var.go
+++ b/operator/internal/manifests/storage/var.go
@@ -1,5 +1,7 @@
package storage
+import "fmt"
+
const (
// EnvAlibabaCloudAccessKeyID is the environment variable to specify the AlibabaCloud client id to access S3.
EnvAlibabaCloudAccessKeyID = "ALIBABA_CLOUD_ACCESS_KEY_ID"
@@ -127,27 +129,29 @@ const (
// KeySwiftUsername is the secret data key for the OpenStack Swift password.
KeySwiftUsername = "username"
- saTokenVolumeK8sDirectory = "/var/run/secrets/kubernetes.io/serviceaccount"
- saTokenVolumeName = "bound-sa-token"
- saTokenExpiration int64 = 3600
+ saTokenVolumeName = "bound-sa-token"
+ saTokenExpiration int64 = 3600
+ saTokenVolumeMountPath = "/var/run/secrets/storage/serviceaccount"
+
+ ServiceAccountTokenFilePath = saTokenVolumeMountPath + "/token"
+
+ secretDirectory = "/etc/storage/secrets"
+ storageTLSVolume = "storage-tls"
+ caDirectory = "/etc/storage/ca"
- secretDirectory = "/etc/storage/secrets"
- managedAuthSecretDirectory = "/etc/storage/managed-auth"
- storageTLSVolume = "storage-tls"
- caDirectory = "/etc/storage/ca"
+ managedAuthConfigVolumeName = "managed-auth-config"
+ managedAuthConfigDirectory = "/etc/storage/managed-auth"
- awsDefaultAudience = "sts.amazonaws.com"
- AWSTokenVolumeDirectory = "/var/run/secrets/aws/serviceaccount"
+ awsDefaultAudience = "sts.amazonaws.com"
- azureDefaultAudience = "api://AzureADTokenExchange"
- azureTokenVolumeDirectory = "/var/run/secrets/azure/serviceaccount"
+ azureDefaultAudience = "api://AzureADTokenExchange"
azureManagedCredentialKeyClientID = "azure_client_id"
azureManagedCredentialKeyTenantID = "azure_tenant_id"
azureManagedCredentialKeySubscriptionID = "azure_subscription_id"
-
- gcpTokenVolumeDirectory = "/var/run/secrets/gcp/serviceaccount"
- GCPDefautCredentialsFile = gcpTokenVolumeDirectory + "/token"
-
- AnnotationCredentialsRequestsSecretRef = "loki.grafana.com/credentials-request-secret-ref"
)
+
+// ManagedCredentialsSecretName returns the name of the secret holding the managed credentials.
+func ManagedCredentialsSecretName(stackName string) string {
+ return fmt.Sprintf("%s-managed-credentials", stackName)
+}
diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go
index 281a167355c3..c544695d3d2e 100644
--- a/operator/internal/status/status.go
+++ b/operator/internal/status/status.go
@@ -17,7 +17,7 @@ import (
// Refresh executes an aggregate update of the LokiStack Status struct, i.e.
// - It recreates the Status.Components pod status map per component.
// - It sets the appropriate Status.Condition to true that matches the pod status maps.
-func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request, now time.Time, degradedErr *DegradedError) error {
+func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request, now time.Time, credentialMode lokiv1.CredentialMode, degradedErr *DegradedError) error {
var stack lokiv1.LokiStack
if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
if apierrors.IsNotFound(err) {
@@ -45,6 +45,7 @@ func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request, now time.Time,
statusUpdater := func(stack *lokiv1.LokiStack) {
stack.Status.Components = *cs
stack.Status.Conditions = mergeConditions(stack.Status.Conditions, activeConditions, metaTime)
+ stack.Status.Storage.CredentialMode = credentialMode
}
statusUpdater(&stack)
diff --git a/operator/internal/status/status_test.go b/operator/internal/status/status_test.go
index c7895cbe8020..32ef892ed1bd 100644
--- a/operator/internal/status/status_test.go
+++ b/operator/internal/status/status_test.go
@@ -54,7 +54,9 @@ func TestRefreshSuccess(t *testing.T) {
Gateway: map[corev1.PodPhase][]string{corev1.PodRunning: {"lokistack-gateway-pod-0"}},
Ruler: map[corev1.PodPhase][]string{corev1.PodRunning: {"ruler-pod-0"}},
},
- Storage: lokiv1.LokiStackStorageStatus{},
+ Storage: lokiv1.LokiStackStorageStatus{
+ CredentialMode: lokiv1.CredentialModeStatic,
+ },
Conditions: []metav1.Condition{
{
Type: string(lokiv1.ConditionReady),
@@ -68,7 +70,7 @@ func TestRefreshSuccess(t *testing.T) {
k, sw := setupListClient(t, stack, componentPods)
- err := Refresh(context.Background(), k, req, now, nil)
+ err := Refresh(context.Background(), k, req, now, lokiv1.CredentialModeStatic, nil)
require.NoError(t, err)
require.Equal(t, 1, k.GetCallCount())
@@ -130,7 +132,7 @@ func TestRefreshSuccess_ZoneAwarePendingPod(t *testing.T) {
return nil
}
- err := Refresh(context.Background(), k, req, now, nil)
+ err := Refresh(context.Background(), k, req, now, lokiv1.CredentialModeStatic, nil)
require.NoError(t, err)
require.Equal(t, 1, k.GetCallCount())
diff --git a/operator/main.go b/operator/main.go
index a88a857bcee4..e212c268cbad 100644
--- a/operator/main.go
+++ b/operator/main.go
@@ -21,7 +21,6 @@ import (
lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
lokictrl "github.com/grafana/loki/operator/controllers/loki"
"github.com/grafana/loki/operator/internal/config"
- manifestsocp "github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/grafana/loki/operator/internal/metrics"
"github.com/grafana/loki/operator/internal/operator"
"github.com/grafana/loki/operator/internal/validation"
@@ -60,12 +59,16 @@ func main() {
var err error
- ctrlCfg, options, err := config.LoadConfig(scheme, configFile)
+ ctrlCfg, managedAuth, options, err := config.LoadConfig(scheme, configFile)
if err != nil {
logger.Error(err, "failed to load operator configuration")
os.Exit(1)
}
+ if managedAuth != nil {
+ logger.Info("Discovered OpenShift Cluster within a managed authentication environment")
+ }
+
if ctrlCfg.Gates.LokiStackAlerts && !ctrlCfg.Gates.ServiceMonitors {
logger.Error(kverrors.New("LokiStackAlerts flag requires ServiceMonitors"), "")
os.Exit(1)
@@ -95,16 +98,12 @@ func main() {
os.Exit(1)
}
- if ctrlCfg.Gates.OpenShift.Enabled && manifestsocp.DiscoverManagedAuthEnv() != nil {
- logger.Info("discovered OpenShift Cluster within a managed authentication environment")
- ctrlCfg.Gates.OpenShift.ManagedAuthEnv = true
- }
-
if err = (&lokictrl.LokiStackReconciler{
Client: mgr.GetClient(),
Log: logger.WithName("controllers").WithName("lokistack"),
Scheme: mgr.GetScheme(),
FeatureGates: ctrlCfg.Gates,
+ AuthConfig: managedAuth,
}).SetupWithManager(mgr); err != nil {
logger.Error(err, "unable to create controller", "controller", "lokistack")
os.Exit(1)
@@ -129,17 +128,6 @@ func main() {
}
}
- if ctrlCfg.Gates.OpenShift.ManagedAuthEnabled() {
- if err = (&lokictrl.CredentialsRequestsReconciler{
- Client: mgr.GetClient(),
- Scheme: mgr.GetScheme(),
- Log: logger.WithName("controllers").WithName("lokistack-credentialsrequest"),
- }).SetupWithManager(mgr); err != nil {
- logger.Error(err, "unable to create controller", "controller", "lokistack-credentialsrequest")
- os.Exit(1)
- }
- }
-
if ctrlCfg.Gates.LokiStackWebhook {
v := &validation.LokiStackValidator{}
if err = v.SetupWebhookWithManager(mgr); err != nil {
From 85908fafdfc9261c2f12f630726fa0dad514f886 Mon Sep 17 00:00:00 2001
From: Zirko <64951262+QuantumEnigmaa@users.noreply.github.com>
Date: Tue, 13 Feb 2024 13:40:46 +0100
Subject: [PATCH 055/130] Helm: fix changelog entry and bump chart version
(#11925)
**What this PR does / why we need it**:
This PR fixes the changelog by moving the `bugfix` entry for the cilium
network policies to a separate chart version. The 5.42.2 had already
been released when the said entry was added to it.
**Special notes for your reviewer**:
**Checklist**
- [x] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [ ] Documentation added
- [ ] Tests updated
- [x] `CHANGELOG.md` updated
- [ ] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
- [ ] If the change is deprecating or removing a configuration option,
update the `deprecated-config.yaml` and `deleted-config.yaml` files
respectively in the `tools/deprecated-config-checker` directory.
[Example
PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
Signed-off-by: QuantumEnigmaa
---
production/helm/loki/CHANGELOG.md | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index e849918585ea..068d37a49553 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,9 +13,12 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
-## 5.42.2
+## 5.42.3
- [BUGFIX] Added condition for `egress-discovery` networkPolicies and ciliumNetworkPolicies.
+
+## 5.42.2
+
- [BUGFIX] Remove trailing tab character in statefulset templates
## 5.42.1
From 472496feb7c6b2461b6a537a7acaa0d6f6046ea4 Mon Sep 17 00:00:00 2001
From: Paul Rogers <129207811+paul1r@users.noreply.github.com>
Date: Tue, 13 Feb 2024 08:58:29 -0500
Subject: [PATCH 056/130] Parse JSON String arrays properly so string elements
can be retrieved (#11921)
**What this PR does / why we need it**:
This PR imports the newly forked grafana/jsonparser over the
buger/jsonparser module. The latter has seemingly been abandoned. PR
10690 introduces a fix to the jsonparser module, which has been
incorporated into the grafana fork of the module.
The PR is designed to fix accessing string array elements from within a
JSON structure. For example, with the following JSON:
`{"log":{"message":{"content":{"misses":["a","b","c","d"]}}}}`
The Loki code, before this PR, when searching for `json misses =
"log.message.content.misses[0]" ` will result in an "Unknown value type
error". After this PR is merged, the result will assign `a` to the
`misses` variable.
**Which issue(s) this PR fixes**:
Fixes #[9179](https://github.com/grafana/loki/issues/9179)
https://github.com/grafana/loki/pull/10690
**Special notes for your reviewer**:
**Checklist**
- [x] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [ ] Documentation added
- [x] Tests updated
- [x] `CHANGELOG.md` updated
- [x] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
- [ ] If the change is deprecating or removing a configuration option,
update the `deprecated-config.yaml` and `deleted-config.yaml` files
respectively in the `tools/deprecated-config-checker` directory.
[Example
PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
---
CHANGELOG.md | 1 +
.../pkg/promtail/targets/cloudflare/target.go | 2 +-
go.mod | 2 +-
go.sum | 4 +-
integration/client/client.go | 2 +-
operator/.bingo/go.mod | 2 +-
pkg/loghttp/entry.go | 2 +-
pkg/loghttp/labels.go | 2 +-
pkg/loghttp/query.go | 2 +-
pkg/logql/log/parser.go | 2 +-
pkg/logql/log/parser_test.go | 16 ++++-
pkg/querier/queryrange/extensions.go | 2 +-
.../github.com/buger/jsonparser/.travis.yml | 11 ----
.../{buger => grafana}/jsonparser/.gitignore | 0
.../github.com/grafana/jsonparser/.travis.yml | 12 ++++
.../{buger => grafana}/jsonparser/Dockerfile | 0
.../{buger => grafana}/jsonparser/LICENSE | 0
.../{buger => grafana}/jsonparser/Makefile | 0
.../{buger => grafana}/jsonparser/README.md | 4 --
.../{buger => grafana}/jsonparser/bytes.go | 36 ++++++------
.../jsonparser/bytes_safe.go | 0
.../jsonparser/bytes_unsafe.go | 0
.../{buger => grafana}/jsonparser/escape.go | 0
.../{buger => grafana}/jsonparser/fuzz.go | 0
.../jsonparser/oss-fuzz-build.sh | 0
.../{buger => grafana}/jsonparser/parser.go | 58 +++++++++++++++----
vendor/modules.txt | 6 +-
27 files changed, 105 insertions(+), 61 deletions(-)
delete mode 100644 vendor/github.com/buger/jsonparser/.travis.yml
rename vendor/github.com/{buger => grafana}/jsonparser/.gitignore (100%)
create mode 100644 vendor/github.com/grafana/jsonparser/.travis.yml
rename vendor/github.com/{buger => grafana}/jsonparser/Dockerfile (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/LICENSE (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/Makefile (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/README.md (99%)
rename vendor/github.com/{buger => grafana}/jsonparser/bytes.go (50%)
rename vendor/github.com/{buger => grafana}/jsonparser/bytes_safe.go (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/bytes_unsafe.go (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/escape.go (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/fuzz.go (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/oss-fuzz-build.sh (100%)
rename vendor/github.com/{buger => grafana}/jsonparser/parser.go (96%)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5e5adea5d2a0..60b9e3dc5e2c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -66,6 +66,7 @@
* [11657](https://github.com/grafana/loki/pull/11657) **ashwanthgoli** Log results cache: compose empty response based on the request being served to avoid returning incorrect limit or direction.
* [11587](https://github.com/grafana/loki/pull/11587) **trevorwhitney** Fix semantics of label parsing logic of metrics and logs queries. Both only parse the first label if multiple extractions into the same label are requested.
* [11776](https://github.com/grafana/loki/pull/11776) **ashwanthgoli** Background Cache: Fixes a bug that is causing the background queue size to be incremented twice for each enqueued item.
+* [11921](https://github.com/grafana/loki/pull/11921) **paul1r**: Parsing: String array elements were not being parsed correctly in JSON processing
##### Changes
diff --git a/clients/pkg/promtail/targets/cloudflare/target.go b/clients/pkg/promtail/targets/cloudflare/target.go
index b64e33da4bc2..19d1f1875827 100644
--- a/clients/pkg/promtail/targets/cloudflare/target.go
+++ b/clients/pkg/promtail/targets/cloudflare/target.go
@@ -8,13 +8,13 @@ import (
"sync"
"time"
- "github.com/buger/jsonparser"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/cloudflare-go"
"github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/concurrency"
"github.com/grafana/dskit/multierror"
+ "github.com/grafana/jsonparser"
"github.com/prometheus/common/model"
"go.uber.org/atomic"
diff --git a/go.mod b/go.mod
index 87ea0fd07585..6235582406d5 100644
--- a/go.mod
+++ b/go.mod
@@ -21,7 +21,6 @@ require (
github.com/aws/aws-sdk-go v1.44.321
github.com/baidubce/bce-sdk-go v0.9.141
github.com/bmatcuk/doublestar v1.3.4
- github.com/buger/jsonparser v1.1.1
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
github.com/cespare/xxhash v1.1.0
github.com/cespare/xxhash/v2 v2.2.0
@@ -124,6 +123,7 @@ require (
github.com/efficientgo/core v1.0.0-rc.2
github.com/fsnotify/fsnotify v1.6.0
github.com/gogo/googleapis v1.4.0
+ github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d
github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608
github.com/heroku/x v0.0.61
github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b
diff --git a/go.sum b/go.sum
index dd756d74f7c6..8ab729e92805 100644
--- a/go.sum
+++ b/go.sum
@@ -390,8 +390,6 @@ github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
-github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM=
@@ -1003,6 +1001,8 @@ github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/I
github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I=
github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 h1:aLBiDMjTtXx2800iCIp+8kdjIlvGX0MF/zICQMQO2qU=
github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU=
+github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d h1:YwbJJ/PrVWVdnR+j/EAVuazdeP+Za5qbiH1Vlr+wFXs=
+github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo=
diff --git a/integration/client/client.go b/integration/client/client.go
index 2e5a86aa6b3d..1ad94fd0edbb 100644
--- a/integration/client/client.go
+++ b/integration/client/client.go
@@ -14,9 +14,9 @@ import (
"strings"
"time"
- "github.com/buger/jsonparser"
"github.com/gorilla/websocket"
"github.com/grafana/dskit/user"
+ "github.com/grafana/jsonparser"
"github.com/prometheus/common/config"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/collector/pdata/pcommon"
diff --git a/operator/.bingo/go.mod b/operator/.bingo/go.mod
index 610249af0b0b..3aa5b7c946f5 100644
--- a/operator/.bingo/go.mod
+++ b/operator/.bingo/go.mod
@@ -1 +1 @@
-module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files.
\ No newline at end of file
+module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files.
diff --git a/pkg/loghttp/entry.go b/pkg/loghttp/entry.go
index 2a55ac9ecd28..0529bf536a2d 100644
--- a/pkg/loghttp/entry.go
+++ b/pkg/loghttp/entry.go
@@ -6,7 +6,7 @@ import (
"time"
"unsafe"
- "github.com/buger/jsonparser"
+ "github.com/grafana/jsonparser"
jsoniter "github.com/json-iterator/go"
"github.com/modern-go/reflect2"
"github.com/prometheus/prometheus/model/labels"
diff --git a/pkg/loghttp/labels.go b/pkg/loghttp/labels.go
index b15a94ab2341..98bad4e95786 100644
--- a/pkg/loghttp/labels.go
+++ b/pkg/loghttp/labels.go
@@ -6,8 +6,8 @@ import (
"strconv"
"strings"
- "github.com/buger/jsonparser"
"github.com/gorilla/mux"
+ "github.com/grafana/jsonparser"
"github.com/grafana/loki/pkg/logproto"
)
diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go
index 617754393538..854ccd5ae711 100644
--- a/pkg/loghttp/query.go
+++ b/pkg/loghttp/query.go
@@ -8,7 +8,7 @@ import (
"time"
"unsafe"
- "github.com/buger/jsonparser"
+ "github.com/grafana/jsonparser"
json "github.com/json-iterator/go"
"github.com/prometheus/common/model"
diff --git a/pkg/logql/log/parser.go b/pkg/logql/log/parser.go
index c03e7c91cb96..90d4a4bebf8a 100644
--- a/pkg/logql/log/parser.go
+++ b/pkg/logql/log/parser.go
@@ -6,7 +6,7 @@ import (
"fmt"
"unicode/utf8"
- "github.com/buger/jsonparser"
+ "github.com/grafana/jsonparser"
"github.com/grafana/loki/pkg/logql/log/jsonexpr"
"github.com/grafana/loki/pkg/logql/log/logfmt"
diff --git a/pkg/logql/log/parser_test.go b/pkg/logql/log/parser_test.go
index bd57603ab808..f8cf6373a152 100644
--- a/pkg/logql/log/parser_test.go
+++ b/pkg/logql/log/parser_test.go
@@ -237,7 +237,7 @@ func (p *fakeParseHints) ShouldContinueParsingLine(_ string, _ *LabelsBuilder) b
}
func TestJSONExpressionParser(t *testing.T) {
- testLine := []byte(`{"app":"foo","field with space":"value","field with ÜFT8👌":"value","null_field":null,"bool_field":false,"namespace":"prod","pod":{"uuid":"foo","deployment":{"ref":"foobar", "params": [1,2,3]}}}`)
+ testLine := []byte(`{"app":"foo","field with space":"value","field with ÜFT8👌":"value","null_field":null,"bool_field":false,"namespace":"prod","pod":{"uuid":"foo","deployment":{"ref":"foobar", "params": [1,2,3,"string_value"]}}}`)
tests := []struct {
name string
@@ -340,6 +340,16 @@ func TestJSONExpressionParser(t *testing.T) {
labels.FromStrings("param", "1"),
NoParserHints(),
},
+ {
+ "array string element",
+ testLine,
+ []LabelExtractionExpr{
+ NewLabelExtractionExpr("param", `pod.deployment.params[3]`),
+ },
+ labels.EmptyLabels(),
+ labels.FromStrings("param", "string_value"),
+ NoParserHints(),
+ },
{
"full array",
testLine,
@@ -347,7 +357,7 @@ func TestJSONExpressionParser(t *testing.T) {
NewLabelExtractionExpr("params", `pod.deployment.params`),
},
labels.EmptyLabels(),
- labels.FromStrings("params", "[1,2,3]"),
+ labels.FromStrings("params", `[1,2,3,"string_value"]`),
NoParserHints(),
},
{
@@ -357,7 +367,7 @@ func TestJSONExpressionParser(t *testing.T) {
NewLabelExtractionExpr("deployment", `pod.deployment`),
},
labels.EmptyLabels(),
- labels.FromStrings("deployment", `{"ref":"foobar", "params": [1,2,3]}`),
+ labels.FromStrings("deployment", `{"ref":"foobar", "params": [1,2,3,"string_value"]}`),
NoParserHints(),
},
{
diff --git a/pkg/querier/queryrange/extensions.go b/pkg/querier/queryrange/extensions.go
index b8a0ca7f4193..75d4ce2cb4ed 100644
--- a/pkg/querier/queryrange/extensions.go
+++ b/pkg/querier/queryrange/extensions.go
@@ -3,7 +3,7 @@ package queryrange
import (
"fmt"
- "github.com/buger/jsonparser"
+ "github.com/grafana/jsonparser"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
diff --git a/vendor/github.com/buger/jsonparser/.travis.yml b/vendor/github.com/buger/jsonparser/.travis.yml
deleted file mode 100644
index dbfb7cf98830..000000000000
--- a/vendor/github.com/buger/jsonparser/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-go:
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - 1.11.x
-script: go test -v ./.
diff --git a/vendor/github.com/buger/jsonparser/.gitignore b/vendor/github.com/grafana/jsonparser/.gitignore
similarity index 100%
rename from vendor/github.com/buger/jsonparser/.gitignore
rename to vendor/github.com/grafana/jsonparser/.gitignore
diff --git a/vendor/github.com/grafana/jsonparser/.travis.yml b/vendor/github.com/grafana/jsonparser/.travis.yml
new file mode 100644
index 000000000000..56f9c9c42123
--- /dev/null
+++ b/vendor/github.com/grafana/jsonparser/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+arch:
+ - amd64
+ - ppc64le
+go:
+ - 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - 1.16.x
+ - 1.17.x
+ - 1.18.x
+script: go test -v ./.
diff --git a/vendor/github.com/buger/jsonparser/Dockerfile b/vendor/github.com/grafana/jsonparser/Dockerfile
similarity index 100%
rename from vendor/github.com/buger/jsonparser/Dockerfile
rename to vendor/github.com/grafana/jsonparser/Dockerfile
diff --git a/vendor/github.com/buger/jsonparser/LICENSE b/vendor/github.com/grafana/jsonparser/LICENSE
similarity index 100%
rename from vendor/github.com/buger/jsonparser/LICENSE
rename to vendor/github.com/grafana/jsonparser/LICENSE
diff --git a/vendor/github.com/buger/jsonparser/Makefile b/vendor/github.com/grafana/jsonparser/Makefile
similarity index 100%
rename from vendor/github.com/buger/jsonparser/Makefile
rename to vendor/github.com/grafana/jsonparser/Makefile
diff --git a/vendor/github.com/buger/jsonparser/README.md b/vendor/github.com/grafana/jsonparser/README.md
similarity index 99%
rename from vendor/github.com/buger/jsonparser/README.md
rename to vendor/github.com/grafana/jsonparser/README.md
index d7e0ec397aff..0b2f1fb03712 100644
--- a/vendor/github.com/buger/jsonparser/README.md
+++ b/vendor/github.com/grafana/jsonparser/README.md
@@ -90,10 +90,6 @@ jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, er
// For more information see docs below
```
-## Need to speedup your app?
-
-I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com.
-
## Reference
Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it.
diff --git a/vendor/github.com/buger/jsonparser/bytes.go b/vendor/github.com/grafana/jsonparser/bytes.go
similarity index 50%
rename from vendor/github.com/buger/jsonparser/bytes.go
rename to vendor/github.com/grafana/jsonparser/bytes.go
index 0bb0ff39562c..9d6e701f5836 100644
--- a/vendor/github.com/buger/jsonparser/bytes.go
+++ b/vendor/github.com/grafana/jsonparser/bytes.go
@@ -1,11 +1,8 @@
package jsonparser
-import (
- bio "bytes"
-)
-
-// minInt64 '-9223372036854775808' is the smallest representable number in int64
-const minInt64 = `9223372036854775808`
+const absMinInt64 = 1 << 63
+const maxInt64 = 1<<63 - 1
+const maxUint64 = 1<<64 - 1
// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON
func parseInt(bytes []byte) (v int64, ok bool, overflow bool) {
@@ -19,29 +16,32 @@ func parseInt(bytes []byte) (v int64, ok bool, overflow bool) {
bytes = bytes[1:]
}
- var b int64 = 0
+ var n uint64 = 0
for _, c := range bytes {
- if c >= '0' && c <= '9' {
- b = (10 * v) + int64(c-'0')
- } else {
+ if c < '0' || c > '9' {
return 0, false, false
}
- if overflow = (b < v); overflow {
- break
+ if n > maxUint64/10 {
+ return 0, false, true
+ }
+ n *= 10
+ n1 := n + uint64(c-'0')
+ if n1 < n {
+ return 0, false, true
}
- v = b
+ n = n1
}
- if overflow {
- if neg && bio.Equal(bytes, []byte(minInt64)) {
- return b, true, false
+ if n > maxInt64 {
+ if neg && n == absMinInt64 {
+ return -absMinInt64, true, false
}
return 0, false, true
}
if neg {
- return -v, true, false
+ return -int64(n), true, false
} else {
- return v, true, false
+ return int64(n), true, false
}
}
diff --git a/vendor/github.com/buger/jsonparser/bytes_safe.go b/vendor/github.com/grafana/jsonparser/bytes_safe.go
similarity index 100%
rename from vendor/github.com/buger/jsonparser/bytes_safe.go
rename to vendor/github.com/grafana/jsonparser/bytes_safe.go
diff --git a/vendor/github.com/buger/jsonparser/bytes_unsafe.go b/vendor/github.com/grafana/jsonparser/bytes_unsafe.go
similarity index 100%
rename from vendor/github.com/buger/jsonparser/bytes_unsafe.go
rename to vendor/github.com/grafana/jsonparser/bytes_unsafe.go
diff --git a/vendor/github.com/buger/jsonparser/escape.go b/vendor/github.com/grafana/jsonparser/escape.go
similarity index 100%
rename from vendor/github.com/buger/jsonparser/escape.go
rename to vendor/github.com/grafana/jsonparser/escape.go
diff --git a/vendor/github.com/buger/jsonparser/fuzz.go b/vendor/github.com/grafana/jsonparser/fuzz.go
similarity index 100%
rename from vendor/github.com/buger/jsonparser/fuzz.go
rename to vendor/github.com/grafana/jsonparser/fuzz.go
diff --git a/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh b/vendor/github.com/grafana/jsonparser/oss-fuzz-build.sh
similarity index 100%
rename from vendor/github.com/buger/jsonparser/oss-fuzz-build.sh
rename to vendor/github.com/grafana/jsonparser/oss-fuzz-build.sh
diff --git a/vendor/github.com/buger/jsonparser/parser.go b/vendor/github.com/grafana/jsonparser/parser.go
similarity index 96%
rename from vendor/github.com/buger/jsonparser/parser.go
rename to vendor/github.com/grafana/jsonparser/parser.go
index 14b80bc4838c..5df2a463dcee 100644
--- a/vendor/github.com/buger/jsonparser/parser.go
+++ b/vendor/github.com/grafana/jsonparser/parser.go
@@ -18,6 +18,7 @@ var (
MalformedValueError = errors.New("Value looks like Number/Boolean/None, but can't find its end: ',' or '}' symbol")
OverflowIntegerError = errors.New("Value is number, but overflowed while parsing")
MalformedStringEscapeError = errors.New("Encountered an invalid escape sequence in a string")
+ NullValueError = errors.New("Value is null")
)
// How much stack space to allocate for unescaping JSON strings; if a string longer
@@ -49,10 +50,13 @@ func findTokenStart(data []byte, token byte) int {
}
func findKeyStart(data []byte, key string) (int, error) {
- i := 0
+ i := nextToken(data)
+ if i == -1 {
+ return i, KeyPathNotFoundError
+ }
ln := len(data)
- if ln > 0 && (data[0] == '{' || data[0] == '[') {
- i = 1
+ if ln > 0 && (data[i] == '{' || data[i] == '[') {
+ i += 1
}
var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings
@@ -308,7 +312,7 @@ func searchKeys(data []byte, keys ...string) int {
case '[':
// If we want to get array element by index
if keyLevel == level && keys[level][0] == '[' {
- var keyLen = len(keys[level])
+ keyLen := len(keys[level])
if keyLen < 3 || keys[level][0] != '[' || keys[level][keyLen-1] != ']' {
return -1
}
@@ -319,7 +323,7 @@ func searchKeys(data []byte, keys ...string) int {
var curIdx int
var valueFound []byte
var valueOffset int
- var curI = i
+ curI := i
ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) {
if curIdx == aIdx {
valueFound = value
@@ -374,12 +378,19 @@ func sameTree(p1, p2 []string) bool {
return true
}
+const stackArraySize = 128
+
func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]string) int {
var x struct{}
- pathFlags := make([]bool, len(paths))
var level, pathsMatched, i int
ln := len(data)
+ pathFlags := make([]bool, stackArraySize)[:]
+ if len(paths) > cap(pathFlags) {
+ pathFlags = make([]bool, len(paths))[:]
+ }
+ pathFlags = pathFlags[0:len(paths)]
+
var maxPath int
for _, p := range paths {
if len(p) > maxPath {
@@ -387,7 +398,11 @@ func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]str
}
}
- pathsBuf := make([]string, maxPath)
+ pathsBuf := make([]string, stackArraySize)[:]
+ if maxPath > cap(pathsBuf) {
+ pathsBuf = make([]string, maxPath)[:]
+ }
+ pathsBuf = pathsBuf[0:maxPath]
for i < ln {
switch data[i] {
@@ -484,7 +499,12 @@ func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]str
case '[':
var ok bool
arrIdxFlags := make(map[int]struct{})
- pIdxFlags := make([]bool, len(paths))
+
+ pIdxFlags := make([]bool, stackArraySize)[:]
+ if len(paths) > cap(pIdxFlags) {
+ pIdxFlags = make([]bool, len(paths))[:]
+ }
+ pIdxFlags = pIdxFlags[0:len(paths)]
if level < 0 {
cb(-1, nil, Unknown, MalformedJsonError)
@@ -519,8 +539,13 @@ func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]str
pathFlags[pi] = true
if of != -1 {
- v, dt, _, e := Get(value[of:])
- cb(pi, v, dt, e)
+ if dataType == String {
+ // the double-quotes were stripped, so we cannot call Get again.
+ cb(pi, value[of:], dataType, nil)
+ } else {
+ v, dt, _, e := Get(value[of:])
+ cb(pi, v, dt, e)
+ }
}
}
}
@@ -662,7 +687,6 @@ func calcAllocateSpace(keys []string, setValue []byte, comma, object bool) int {
}
}
-
lk += len(setValue)
for i := 1; i < len(keys); i++ {
if string(keys[i][0]) == "[" {
@@ -1178,6 +1202,9 @@ func GetString(data []byte, keys ...string) (val string, err error) {
}
if t != String {
+ if t == Null {
+ return "", NullValueError
+ }
return "", fmt.Errorf("Value is not a string: %s", string(v))
}
@@ -1200,6 +1227,9 @@ func GetFloat(data []byte, keys ...string) (val float64, err error) {
}
if t != Number {
+ if t == Null {
+ return 0, NullValueError
+ }
return 0, fmt.Errorf("Value is not a number: %s", string(v))
}
@@ -1216,6 +1246,9 @@ func GetInt(data []byte, keys ...string) (val int64, err error) {
}
if t != Number {
+ if t == Null {
+ return 0, NullValueError
+ }
return 0, fmt.Errorf("Value is not a number: %s", string(v))
}
@@ -1233,6 +1266,9 @@ func GetBoolean(data []byte, keys ...string) (val bool, err error) {
}
if t != Boolean {
+ if t == Null {
+ return false, NullValueError
+ }
return false, fmt.Errorf("Value is not a boolean: %s", string(v))
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 371c20583493..e7cbf47887d6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -420,9 +420,6 @@ github.com/beorn7/perks/quantile
# github.com/bmatcuk/doublestar v1.3.4
## explicit; go 1.12
github.com/bmatcuk/doublestar
-# github.com/buger/jsonparser v1.1.1
-## explicit; go 1.13
-github.com/buger/jsonparser
# github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
## explicit
github.com/c2h5oh/datasize
@@ -905,6 +902,9 @@ github.com/grafana/go-gelf/v2/gelf
# github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0
## explicit; go 1.18
github.com/grafana/gomemcache/memcache
+# github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d
+## explicit; go 1.13
+github.com/grafana/jsonparser
# github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 => ./pkg/push
## explicit; go 1.19
github.com/grafana/loki/pkg/push
From 1f9f9a6e5f0c61e8d42787488f17a42e9fbfdf13 Mon Sep 17 00:00:00 2001
From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com>
Date: Tue, 13 Feb 2024 14:31:56 +0000
Subject: [PATCH 057/130] [Release Notes Appender] Add PR #11921: Parse JSON
String arrays properly so string elements can be retrieved (#11931)
Add PR #11921 to release notes for next release
---------
Co-authored-by: Paul Rogers
---
docs/sources/release-notes/next.md | 34 ++++++++++++++++--------------
1 file changed, 18 insertions(+), 16 deletions(-)
diff --git a/docs/sources/release-notes/next.md b/docs/sources/release-notes/next.md
index a2a6e8133008..1aadfcba4db1 100644
--- a/docs/sources/release-notes/next.md
+++ b/docs/sources/release-notes/next.md
@@ -1,16 +1,18 @@
----
-title: V?.?
-description: Version ?.? release notes
-weight: 55
----
-
-# V?.?
-Grafana Labs is excited to announce the release of Loki ?.?.? Here's a summary of new enhancements and important fixes:
-
-:warning: This a placeholder for the next release. Clean up all features listed below
-
-## Features and enhancements
-
-## Upgrade Considerations
-
-## Bug fixes
+---
+title: V?.?
+description: Version ?.? release notes
+weight: 55
+---
+
+# V?.?
+Grafana Labs is excited to announce the release of Loki ?.?.? Here's a summary of new enhancements and important fixes:
+
+:warning: This a placeholder for the next release. Clean up all features listed below
+
+## Features and enhancements
+
+## Upgrade Considerations
+
+## Bug fixes
+
+- **Parse JSON String arrays properly so string elements can be retrieved**: [PR #11921](https://github.com/grafana/loki/pull/11921)]
\ No newline at end of file
From b2e4cc39a88409c17215ac484d7c98adcfe53750 Mon Sep 17 00:00:00 2001
From: Paul Rogers <129207811+paul1r@users.noreply.github.com>
Date: Tue, 13 Feb 2024 12:45:15 -0500
Subject: [PATCH 058/130] Add mutex for t.stopped to prevent data races
(#11933)
**What this PR does / why we need it**:
This addresses the data race present on the `t.stopped` variable in
`tail.go`.
```
==================
WARNING: DATA RACE
Write at 0x00c00098b198 by goroutine 568:
github.com/grafana/loki/pkg/querier.(*Tailer).close()
/Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:272 +0x104
github.com/grafana/loki/pkg/querier.TestTailer.func7.2()
/Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail_test.go:169 +0x34
runtime.deferreturn()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/runtime/panic.go:477 +0x34
testing.tRunner()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1595 +0x1b0
testing.(*T).Run.func1()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x40
Previous read at 0x00c00098b198 by goroutine 569:
github.com/grafana/loki/pkg/querier.(*Tailer).loop()
/Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:88 +0x13c
github.com/grafana/loki/pkg/querier.newTailer.func1()
/Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:342 +0x34
Goroutine 568 (running) created at:
testing.(*T).Run()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x5e8
github.com/grafana/loki/pkg/querier.TestTailer()
/Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail_test.go:158 +0x10dc
testing.tRunner()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1595 +0x1b0
testing.(*T).Run.func1()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x40
Goroutine 569 (running) created at:
github.com/grafana/loki/pkg/querier.newTailer()
/Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail.go:342 +0x300
github.com/grafana/loki/pkg/querier.TestTailer.func7()
/Users/progers/dev/src/github.com/grafana/loki/pkg/querier/tail_test.go:168 +0x138
testing.tRunner()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1595 +0x1b0
testing.(*T).Run.func1()
/opt/homebrew/Cellar/go/1.21.6/libexec/src/testing/testing.go:1648 +0x40
==================
```
**Which issue(s) this PR fixes**:
Relates to: https://github.com/grafana/loki/issues/8586
**Special notes for your reviewer**:
**Checklist**
- [ ] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [ ] Documentation added
- [ ] Tests updated
- [ ] `CHANGELOG.md` updated
- [ ] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
- [ ] If the change is deprecating or removing a configuration option,
update the `deprecated-config.yaml` and `deleted-config.yaml` files
respectively in the `tools/deprecated-config-checker` directory.
[Example
PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
---
pkg/querier/http.go | 2 +-
pkg/querier/tail.go | 15 ++++++++++-----
pkg/querier/tail_test.go | 2 +-
3 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/pkg/querier/http.go b/pkg/querier/http.go
index dc29c2f61e04..a508bf9f7286 100644
--- a/pkg/querier/http.go
+++ b/pkg/querier/http.go
@@ -199,7 +199,7 @@ func (q *QuerierAPI) TailHandler(w http.ResponseWriter, r *http.Request) {
}
level.Error(logger).Log("msg", "Error from client", "err", err)
break
- } else if tailer.stopped {
+ } else if tailer.stopped.Load() {
return
}
diff --git a/pkg/querier/tail.go b/pkg/querier/tail.go
index 1b3cfd5fcb8c..35cb4bc18e7a 100644
--- a/pkg/querier/tail.go
+++ b/pkg/querier/tail.go
@@ -6,6 +6,8 @@ import (
"sync"
"time"
+ "go.uber.org/atomic"
+
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
@@ -51,7 +53,7 @@ type Tailer struct {
querierTailClients map[string]logproto.Querier_TailClient // addr -> grpc clients for tailing logs from ingesters
querierTailClientsMtx sync.RWMutex
- stopped bool
+ stopped atomic.Bool
delayFor time.Duration
responseChan chan *loghttp.TailResponse
closeErrChan chan error
@@ -85,7 +87,8 @@ func (t *Tailer) loop() {
droppedEntries := make([]loghttp.DroppedEntry, 0)
- for !t.stopped {
+ stopped := t.stopped.Load()
+ for !stopped {
select {
case <-checkConnectionTicker.C:
// Try to reconnect dropped ingesters and connect to new ingesters
@@ -214,7 +217,8 @@ func (t *Tailer) readTailClient(addr string, querierTailClient logproto.Querier_
logger := util_log.WithContext(querierTailClient.Context(), t.logger)
for {
- if t.stopped {
+ stopped := t.stopped.Load()
+ if stopped {
if err := querierTailClient.CloseSend(); err != nil {
level.Error(logger).Log("msg", "Error closing grpc tail client", "err", err)
}
@@ -223,7 +227,7 @@ func (t *Tailer) readTailClient(addr string, querierTailClient logproto.Querier_
resp, err = querierTailClient.Recv()
if err != nil {
// We don't want to log error when its due to stopping the tail request
- if !t.stopped {
+ if !stopped {
level.Error(logger).Log("msg", "Error receiving response from grpc tail client", "err", err)
}
break
@@ -269,7 +273,8 @@ func (t *Tailer) close() error {
t.metrics.tailsActive.Dec()
t.metrics.tailedStreamsActive.Sub(t.activeStreamCount())
- t.stopped = true
+ t.stopped.Store(true)
+
return t.openStreamIterator.Close()
}
diff --git a/pkg/querier/tail_test.go b/pkg/querier/tail_test.go
index d0b17ea126e2..07d3743af03c 100644
--- a/pkg/querier/tail_test.go
+++ b/pkg/querier/tail_test.go
@@ -389,7 +389,7 @@ func readFromTailer(tailer *Tailer, maxEntries int) ([]*loghttp.TailResponse, er
timeoutTicker := time.NewTicker(timeout)
defer timeoutTicker.Stop()
- for !tailer.stopped && entriesCount < maxEntries {
+ for !tailer.stopped.Load() && entriesCount < maxEntries {
select {
case <-timeoutTicker.C:
return nil, errors.New("timeout expired while reading responses from Tailer")
From 25785e03d5f529443ecef331e176999308cc65b0 Mon Sep 17 00:00:00 2001
From: Derek Cadzow
Date: Tue, 13 Feb 2024 15:20:48 -0500
Subject: [PATCH 059/130] Deleting old and irrelevant information (#11929)
---
docs/variables.mk | 3 ---
1 file changed, 3 deletions(-)
diff --git a/docs/variables.mk b/docs/variables.mk
index afa0a9e86736..1ec7dbab5767 100644
--- a/docs/variables.mk
+++ b/docs/variables.mk
@@ -1,8 +1,5 @@
# List of projects to provide to the make-docs script.
PROJECTS := loki
-# Use alternative image until make-docs 3.0.0 is rolled out.
-export DOCS_IMAGE := grafana/docs-base:dbd975af06
-
# Set the DOC_VALIDATOR_IMAGE to match the one defined in CI.
export DOC_VALIDATOR_IMAGE := $(shell sed -En 's, *image: "(grafana/doc-validator.*)",\1,p' "$(shell git rev-parse --show-toplevel)/.github/workflows/doc-validator.yml")
From 1c43991ddcbb801a6a6d7a535062c7c615b0423a Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Wed, 14 Feb 2024 09:49:11 +0100
Subject: [PATCH 060/130] Remove `bloomcompactor.DayTable` in favour of
`config.DayTime` (#11917)
Both structs shared the same semantics.
This PR moves additional functionality from the `DayTable` to the `DayTime` struct.
To get the table name of a day (ordinal number of day since unix epoch), call `DayTime.Table()`.
Signed-off-by: Christian Haudum
---
pkg/bloomcompactor/bloomcompactor.go | 15 +++----
pkg/bloomcompactor/config.go | 38 ----------------
pkg/bloomcompactor/controller.go | 13 +++---
pkg/bloomcompactor/tsdb.go | 34 +++++++-------
pkg/bloomgateway/bloomgateway.go | 2 +-
pkg/bloomgateway/multiplexing.go | 7 +--
pkg/bloomgateway/processor.go | 10 ++---
pkg/bloomgateway/processor_test.go | 5 ++-
pkg/bloomgateway/util.go | 5 ++-
pkg/bloomgateway/util_test.go | 12 ++---
pkg/querier/queryrange/limits.go | 4 +-
pkg/storage/config/schema_config.go | 45 ++++++++++++++++++-
.../stores/shipper/bloomshipper/client.go | 2 +-
13 files changed, 98 insertions(+), 94 deletions(-)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index ed1f50ae7258..566b836609d1 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -169,11 +169,11 @@ func runWithRetries(
type tenantTable struct {
tenant string
- table DayTable
+ table config.DayTime
ownershipRange v1.FingerprintBounds
}
-func (c *Compactor) tenants(ctx context.Context, table DayTable) (v1.Iterator[string], error) {
+func (c *Compactor) tenants(ctx context.Context, table config.DayTime) (v1.Iterator[string], error) {
tenants, err := c.tsdbStore.UsersForPeriod(ctx, table)
if err != nil {
return nil, errors.Wrap(err, "getting tenants")
@@ -214,10 +214,9 @@ func (c *Compactor) tables(ts time.Time) *dayRangeIterator {
from := ts.Add(-maxCompactionPeriod).UnixNano() / int64(config.ObjectStorageIndexRequiredPeriod) * int64(config.ObjectStorageIndexRequiredPeriod)
through := ts.Add(-minCompactionPeriod).UnixNano() / int64(config.ObjectStorageIndexRequiredPeriod) * int64(config.ObjectStorageIndexRequiredPeriod)
- fromDay := DayTable(model.TimeFromUnixNano(from))
- throughDay := DayTable(model.TimeFromUnixNano(through))
+ fromDay := config.NewDayTime(model.TimeFromUnixNano(from))
+ throughDay := config.NewDayTime(model.TimeFromUnixNano(through))
return newDayRangeIterator(fromDay, throughDay)
-
}
func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
@@ -295,10 +294,10 @@ func (c *Compactor) compactTenantTable(ctx context.Context, tt tenantTable) erro
}
type dayRangeIterator struct {
- min, max, cur DayTable
+ min, max, cur config.DayTime
}
-func newDayRangeIterator(min, max DayTable) *dayRangeIterator {
+func newDayRangeIterator(min, max config.DayTime) *dayRangeIterator {
return &dayRangeIterator{min: min, max: max, cur: min.Dec()}
}
@@ -307,7 +306,7 @@ func (r *dayRangeIterator) Next() bool {
return r.cur.Before(r.max)
}
-func (r *dayRangeIterator) At() DayTable {
+func (r *dayRangeIterator) At() config.DayTime {
return r.cur
}
diff --git a/pkg/bloomcompactor/config.go b/pkg/bloomcompactor/config.go
index dd821d81c906..15f9aa86c040 100644
--- a/pkg/bloomcompactor/config.go
+++ b/pkg/bloomcompactor/config.go
@@ -5,10 +5,6 @@ import (
"fmt"
"time"
- "github.com/prometheus/common/model"
-
- "github.com/grafana/loki/pkg/storage/config"
- "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/downloads"
"github.com/grafana/loki/pkg/util/ring"
)
@@ -70,37 +66,3 @@ type Limits interface {
BloomFalsePositiveRate(tenantID string) float64
BloomCompactorMaxBlockSize(tenantID string) int
}
-
-// TODO(owen-d): Remove this type in favor of config.DayTime
-type DayTable model.Time
-
-func (d DayTable) String() string {
- return fmt.Sprintf("%d", d.ModelTime().Time().UnixNano()/int64(config.ObjectStorageIndexRequiredPeriod))
-}
-
-func (d DayTable) Inc() DayTable {
- return DayTable(d.ModelTime().Add(config.ObjectStorageIndexRequiredPeriod))
-}
-
-func (d DayTable) Dec() DayTable {
- return DayTable(d.ModelTime().Add(-config.ObjectStorageIndexRequiredPeriod))
-}
-
-func (d DayTable) Before(other DayTable) bool {
- return d.ModelTime().Before(model.Time(other))
-}
-
-func (d DayTable) After(other DayTable) bool {
- return d.ModelTime().After(model.Time(other))
-}
-
-func (d DayTable) ModelTime() model.Time {
- return model.Time(d)
-}
-
-func (d DayTable) Bounds() bloomshipper.Interval {
- return bloomshipper.Interval{
- Start: model.Time(d),
- End: model.Time(d.Inc()),
- }
-}
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index 47d9627d92e1..8470fd9ad708 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -13,6 +13,7 @@ import (
"github.com/pkg/errors"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb"
)
@@ -55,7 +56,7 @@ func (s *SimpleBloomController) rwFn() (v1.BlockWriter, v1.BlockReader) {
func (s *SimpleBloomController) buildBlocks(
ctx context.Context,
- table DayTable,
+ table config.DayTime,
tenant string,
ownershipRange v1.FingerprintBounds,
) error {
@@ -78,15 +79,11 @@ func (s *SimpleBloomController) buildBlocks(
}
// 2. Fetch metas
- bounds := table.Bounds()
metas, err := s.bloomStore.FetchMetas(
ctx,
bloomshipper.MetaSearchParams{
TenantID: tenant,
- Interval: bloomshipper.Interval{
- Start: bounds.Start,
- End: bounds.End,
- },
+ Interval: bloomshipper.NewInterval(table.Bounds()),
Keyspace: ownershipRange,
},
)
@@ -176,7 +173,7 @@ func (s *SimpleBloomController) buildBlocks(
blockCt++
blk := newBlocks.At()
- built, err := bloomshipper.BlockFrom(tenant, table.String(), blk)
+ built, err := bloomshipper.BlockFrom(tenant, table.Table(), blk)
if err != nil {
level.Error(logger).Log("msg", "failed to build block", "err", err)
return errors.Wrap(err, "failed to build block")
@@ -214,7 +211,7 @@ func (s *SimpleBloomController) buildBlocks(
func (s *SimpleBloomController) loadWorkForGap(
ctx context.Context,
- table DayTable,
+ table config.DayTime,
tenant string,
id tsdb.Identifier,
gap gapWithBlocks,
diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go
index e6fd92961c46..ad7b2eafac4c 100644
--- a/pkg/bloomcompactor/tsdb.go
+++ b/pkg/bloomcompactor/tsdb.go
@@ -26,11 +26,11 @@ const (
)
type TSDBStore interface {
- UsersForPeriod(ctx context.Context, table DayTable) ([]string, error)
- ResolveTSDBs(ctx context.Context, table DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
+ UsersForPeriod(ctx context.Context, table config.DayTime) ([]string, error)
+ ResolveTSDBs(ctx context.Context, table config.DayTime, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
LoadTSDB(
ctx context.Context,
- table DayTable,
+ table config.DayTime,
tenant string,
id tsdb.Identifier,
bounds v1.FingerprintBounds,
@@ -49,13 +49,13 @@ func NewBloomTSDBStore(storage storage.Client) *BloomTSDBStore {
}
}
-func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table DayTable) ([]string, error) {
- _, users, err := b.storage.ListFiles(ctx, table.String(), true) // bypass cache for ease of testing
+func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table config.DayTime) ([]string, error) {
+ _, users, err := b.storage.ListFiles(ctx, table.Table(), true) // bypass cache for ease of testing
return users, err
}
-func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
- indices, err := b.storage.ListUserFiles(ctx, table.String(), tenant, true) // bypass cache for ease of testing
+func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTime, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
+ indices, err := b.storage.ListUserFiles(ctx, table.Table(), tenant, true) // bypass cache for ease of testing
if err != nil {
return nil, errors.Wrap(err, "failed to list user files")
}
@@ -80,14 +80,14 @@ func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table DayTable, tenan
func (b *BloomTSDBStore) LoadTSDB(
ctx context.Context,
- table DayTable,
+ table config.DayTime,
tenant string,
id tsdb.Identifier,
bounds v1.FingerprintBounds,
) (v1.CloseableIterator[*v1.Series], error) {
withCompression := id.Name() + gzipExtension
- data, err := b.storage.GetUserFile(ctx, table.String(), tenant, withCompression)
+ data, err := b.storage.GetUserFile(ctx, table.Table(), tenant, withCompression)
if err != nil {
return nil, errors.Wrap(err, "failed to get file")
}
@@ -244,11 +244,11 @@ func NewTSDBStores(
return res, nil
}
-func (s *TSDBStores) storeForPeriod(table DayTable) (TSDBStore, error) {
+func (s *TSDBStores) storeForPeriod(table config.DayTime) (TSDBStore, error) {
for i := len(s.schemaCfg.Configs) - 1; i >= 0; i-- {
period := s.schemaCfg.Configs[i]
- if !table.Before(DayTable(period.From.Time)) {
+ if !table.Before(period.From) {
// we have the desired period config
if s.stores[i] != nil {
@@ -260,19 +260,19 @@ func (s *TSDBStores) storeForPeriod(table DayTable) (TSDBStore, error) {
return nil, errors.Errorf(
"store for period is not of TSDB type (%s) while looking up store for (%v)",
period.IndexType,
- table.ModelTime().Time(),
+ table,
)
}
}
return nil, fmt.Errorf(
- "There is no store matching no matching period found for table (%v) -- too early",
- table.ModelTime().Time(),
+ "there is no store matching no matching period found for table (%v) -- too early",
+ table,
)
}
-func (s *TSDBStores) UsersForPeriod(ctx context.Context, table DayTable) ([]string, error) {
+func (s *TSDBStores) UsersForPeriod(ctx context.Context, table config.DayTime) ([]string, error) {
store, err := s.storeForPeriod(table)
if err != nil {
return nil, err
@@ -281,7 +281,7 @@ func (s *TSDBStores) UsersForPeriod(ctx context.Context, table DayTable) ([]stri
return store.UsersForPeriod(ctx, table)
}
-func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
+func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTime, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
store, err := s.storeForPeriod(table)
if err != nil {
return nil, err
@@ -292,7 +292,7 @@ func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table DayTable, tenant st
func (s *TSDBStores) LoadTSDB(
ctx context.Context,
- table DayTable,
+ table config.DayTime,
tenant string,
id tsdb.Identifier,
bounds v1.FingerprintBounds,
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index abecbf6773fd..58f709f0be2f 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -369,7 +369,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
tasksCh := make(chan Task, len(tasks))
for _, task := range tasks {
task := task
- level.Info(logger).Log("msg", "enqueue task", "task", task.ID, "day", task.day, "series", len(task.series))
+ level.Info(logger).Log("msg", "enqueue task", "task", task.ID, "table", task.table, "series", len(task.series))
g.queue.Enqueue(tenantID, []string{}, task, func() {
// When enqueuing, we also add the task to the pending tasks
g.pendingTasks.Add(task.ID, task)
diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go
index d2722ad8f149..c952c9f6b87f 100644
--- a/pkg/bloomgateway/multiplexing.go
+++ b/pkg/bloomgateway/multiplexing.go
@@ -12,6 +12,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/config"
)
const (
@@ -69,7 +70,7 @@ type Task struct {
ctx context.Context
// TODO(chaudum): Investigate how to remove that.
- day model.Time
+ table config.DayTime
}
// NewTask returns a new Task that can be enqueued to the task queue.
@@ -89,7 +90,7 @@ func NewTask(ctx context.Context, tenantID string, refs seriesWithBounds, filter
filters: filters,
series: refs.series,
bounds: refs.bounds,
- day: refs.day,
+ table: refs.table,
ctx: ctx,
done: make(chan struct{}),
responses: make([]v1.Output, 0, len(refs.series)),
@@ -129,7 +130,7 @@ func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task {
filters: t.filters,
series: series,
bounds: t.bounds,
- day: t.day,
+ table: t.table,
ctx: t.ctx,
done: make(chan struct{}),
responses: make([]v1.Output, 0, len(series)),
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 4fe9c38483cb..5eab7a858c74 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -7,9 +7,9 @@ import (
"time"
"github.com/go-kit/log"
- "github.com/prometheus/common/model"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
@@ -35,10 +35,9 @@ type processor struct {
}
func (p *processor) run(ctx context.Context, tasks []Task) error {
- for ts, tasks := range group(tasks, func(t Task) model.Time { return t.day }) {
- interval := bloomshipper.NewInterval(ts, ts.Add(Day))
+ for ts, tasks := range group(tasks, func(t Task) config.DayTime { return t.table }) {
tenant := tasks[0].Tenant
- err := p.processTasks(ctx, tenant, interval, []v1.FingerprintBounds{{Min: 0, Max: math.MaxUint64}}, tasks)
+ err := p.processTasks(ctx, tenant, ts, []v1.FingerprintBounds{{Min: 0, Max: math.MaxUint64}}, tasks)
if err != nil {
for _, task := range tasks {
task.CloseWithError(err)
@@ -52,8 +51,9 @@ func (p *processor) run(ctx context.Context, tasks []Task) error {
return nil
}
-func (p *processor) processTasks(ctx context.Context, tenant string, interval bloomshipper.Interval, keyspaces []v1.FingerprintBounds, tasks []Task) error {
+func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, keyspaces []v1.FingerprintBounds, tasks []Task) error {
minFpRange, maxFpRange := getFirstLast(keyspaces)
+ interval := bloomshipper.NewInterval(day.Bounds())
metaSearch := bloomshipper.MetaSearchParams{
TenantID: tenant,
Interval: interval,
diff --git a/pkg/bloomgateway/processor_test.go b/pkg/bloomgateway/processor_test.go
index c4c8f8457b3a..27d0068753d5 100644
--- a/pkg/bloomgateway/processor_test.go
+++ b/pkg/bloomgateway/processor_test.go
@@ -15,6 +15,7 @@ import (
"go.uber.org/atomic"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/util/constants"
)
@@ -109,7 +110,7 @@ func TestProcessor(t *testing.T) {
Start: now.Add(-1 * time.Hour),
End: now,
},
- day: truncateDay(now),
+ table: config.NewDayTime(truncateDay(now)),
}
filters := []syntax.LineFilter{
{Ty: 0, Match: "no match"},
@@ -153,7 +154,7 @@ func TestProcessor(t *testing.T) {
Start: now.Add(-1 * time.Hour),
End: now,
},
- day: truncateDay(now),
+ table: config.NewDayTime(truncateDay(now)),
}
filters := []syntax.LineFilter{
{Ty: 0, Match: "no match"},
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index cf72aec3b5b4..3793076f7c38 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -11,6 +11,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
@@ -121,7 +122,7 @@ func partitionFingerprintRange(tasks []Task, blocks []bloomshipper.BlockRef) (re
type seriesWithBounds struct {
bounds model.Interval
- day model.Time
+ table config.DayTime
series []*logproto.GroupedChunkRefs
}
@@ -173,7 +174,7 @@ func partitionRequest(req *logproto.FilterChunkRefRequest) []seriesWithBounds {
Start: minTs,
End: maxTs,
},
- day: day,
+ table: config.NewDayTime(day),
series: res,
})
}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 8fc37f20bac8..5f4d254e8f04 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -176,7 +176,7 @@ func TestPartitionRequest(t *testing.T) {
exp: []seriesWithBounds{
{
bounds: model.Interval{Start: ts.Add(-60 * time.Minute), End: ts.Add(-45 * time.Minute)},
- day: mktime("2024-01-24 00:00"),
+ table: config.NewDayTime(mktime("2024-01-24 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
@@ -217,7 +217,7 @@ func TestPartitionRequest(t *testing.T) {
exp: []seriesWithBounds{
{
bounds: model.Interval{Start: ts.Add(-23 * time.Hour), End: ts.Add(-22 * time.Hour)},
- day: mktime("2024-01-23 00:00"),
+ table: config.NewDayTime(mktime("2024-01-23 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
@@ -229,7 +229,7 @@ func TestPartitionRequest(t *testing.T) {
},
{
bounds: model.Interval{Start: ts.Add(-2 * time.Hour), End: ts.Add(-1 * time.Hour)},
- day: mktime("2024-01-24 00:00"),
+ table: config.NewDayTime(mktime("2024-01-24 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x01,
@@ -258,7 +258,7 @@ func TestPartitionRequest(t *testing.T) {
exp: []seriesWithBounds{
{
bounds: model.Interval{Start: ts.Add(-13 * time.Hour), End: ts.Add(-11 * time.Hour)},
- day: mktime("2024-01-23 00:00"),
+ table: config.NewDayTime(mktime("2024-01-23 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
@@ -270,7 +270,7 @@ func TestPartitionRequest(t *testing.T) {
},
{
bounds: model.Interval{Start: ts.Add(-13 * time.Hour), End: ts.Add(-11 * time.Hour)},
- day: mktime("2024-01-24 00:00"),
+ table: config.NewDayTime(mktime("2024-01-24 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
@@ -311,7 +311,7 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
}
ref := bloomshipper.Ref{
TenantID: tenant,
- TableName: "table_0",
+ TableName: config.NewDayTime(truncateDay(from)).Table(),
Bounds: v1.NewBounds(fromFp, throughFp),
StartTimestamp: from,
EndTimestamp: through,
diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go
index 79cc9ad16a36..2d1453190969 100644
--- a/pkg/querier/queryrange/limits.go
+++ b/pkg/querier/queryrange/limits.go
@@ -575,7 +575,7 @@ func WeightedParallelism(
// config because query is in future
// or
// there is overlap with current config
- finalOrFuture := i == len(configs)-1 || configs[i].From.After(end)
+ finalOrFuture := i == len(configs)-1 || configs[i].From.Time.After(end)
if finalOrFuture {
return true
}
@@ -605,7 +605,7 @@ func WeightedParallelism(
var tsdbDur, otherDur time.Duration
- for ; i < len(configs) && configs[i].From.Before(end); i++ {
+ for ; i < len(configs) && configs[i].From.Time.Before(end); i++ {
_, from := minMaxModelTime(start, configs[i].From.Time)
through := end
if i+1 < len(configs) {
diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
index 9cdda249ea52..30b9de98b14b 100644
--- a/pkg/storage/config/schema_config.go
+++ b/pkg/storage/config/schema_config.go
@@ -200,6 +200,14 @@ func (cfg *PeriodConfig) GetIndexTableNumberRange(schemaEndDate DayTime) TableRa
}
}
+func (cfg *PeriodConfig) GetFullTableName(t model.Time) string {
+ return NewDayTime(t).TableWithPrefix(cfg)
+}
+
+func NewDayTime(d model.Time) DayTime {
+ return DayTime{d}
+}
+
// DayTime is a model.Time what holds day-aligned values, and marshals to/from
// YAML in YYYY-MM-DD format.
type DayTime struct {
@@ -225,10 +233,45 @@ func (d *DayTime) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
-func (d *DayTime) String() string {
+func (d DayTime) String() string {
return d.Time.Time().UTC().Format("2006-01-02")
}
+func (d DayTime) Table() string {
+ return fmt.Sprintf("%d",
+ d.ModelTime().Time().UnixNano()/int64(ObjectStorageIndexRequiredPeriod))
+}
+
+func (d DayTime) TableWithPrefix(cfg *PeriodConfig) string {
+ return fmt.Sprintf("%s%d",
+ cfg.IndexTables.Prefix,
+ d.ModelTime().Time().UnixNano()/int64(ObjectStorageIndexRequiredPeriod))
+}
+
+func (d DayTime) Inc() DayTime {
+ return DayTime{d.Add(ObjectStorageIndexRequiredPeriod)}
+}
+
+func (d DayTime) Dec() DayTime {
+ return DayTime{d.Add(-ObjectStorageIndexRequiredPeriod)}
+}
+
+func (d DayTime) Before(other DayTime) bool {
+ return d.Time.Before(other.Time)
+}
+
+func (d DayTime) After(other DayTime) bool {
+ return d.Time.After(other.Time)
+}
+
+func (d DayTime) ModelTime() model.Time {
+ return d.Time
+}
+
+func (d DayTime) Bounds() (model.Time, model.Time) {
+ return d.Time, d.Inc().Time
+}
+
// SchemaConfig contains the config for our chunk index schemas
type SchemaConfig struct {
Configs []PeriodConfig `yaml:"configs"`
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 80eba70d18cd..1dbfac579c5a 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -345,7 +345,7 @@ func (b *BloomClient) GetMeta(ctx context.Context, ref MetaRef) (Meta, error) {
func findPeriod(configs []config.PeriodConfig, ts model.Time) (config.DayTime, error) {
for i := len(configs) - 1; i >= 0; i-- {
periodConfig := configs[i]
- if !periodConfig.From.After(ts) {
+ if !periodConfig.From.Time.After(ts) {
return periodConfig.From, nil
}
}
From 1b4d23f9b754458a311ff82fd6b1d52134bcc5e7 Mon Sep 17 00:00:00 2001
From: Justin Burnham
Date: Wed, 14 Feb 2024 00:59:50 -0800
Subject: [PATCH 061/130] Ruler: Disable x-scope-orgid header append in remote
write (#11819)
Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>
Co-authored-by: Danny Kopping
---
CHANGELOG.md | 1 +
docs/sources/configure/_index.md | 4 ++++
pkg/ruler/config.go | 2 ++
pkg/ruler/registry.go | 6 +++--
pkg/ruler/registry_test.go | 39 ++++++++++++++++++++++++++++++++
5 files changed, 50 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 60b9e3dc5e2c..68841de451db 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@
##### Enhancements
+* [11819](https://github.com/grafana/loki/pull/11819) **jburnham**: Ruler: Add the ability to disable the `X-Scope-OrgId` tenant identification header in remote write requests.
* [11633](https://github.com/grafana/loki/pull/11633) **cyriltovena**: Add profiling integrations to tracing instrumentation.
* [11571](https://github.com/grafana/loki/pull/11571) **MichelHollands**: Add a metrics.go log line for requests from querier to ingester
* [11477](https://github.com/grafana/loki/pull/11477) **MichelHollands**: support GET for /ingester/shutdown
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index d5dd9b43bd14..382890b5bcab 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -1274,6 +1274,10 @@ remote_write:
# CLI flag: -ruler.remote-write.config-refresh-period
[config_refresh_period: | default = 10s]
+ # Add X-Scope-OrgID header in remote write requests.
+ # CLI flag: -ruler.remote-write.add-org-id-header
+ [add_org_id_header: | default = true]
+
# Configuration for rule evaluation.
evaluation:
# The evaluation mode for the ruler. Can be either 'local' or 'remote'. If set
diff --git a/pkg/ruler/config.go b/pkg/ruler/config.go
index 22a19851a430..7d948baa0c30 100644
--- a/pkg/ruler/config.go
+++ b/pkg/ruler/config.go
@@ -56,6 +56,7 @@ type RemoteWriteConfig struct {
Clients map[string]config.RemoteWriteConfig `yaml:"clients,omitempty" doc:"description=Configure remote write clients. A map with remote client id as key."`
Enabled bool `yaml:"enabled"`
ConfigRefreshPeriod time.Duration `yaml:"config_refresh_period"`
+ AddOrgIDHeader bool `yaml:"add_org_id_header" doc:"description=Add X-Scope-OrgID header in remote write requests."`
}
func (c *RemoteWriteConfig) Validate() error {
@@ -108,6 +109,7 @@ func (c *RemoteWriteConfig) Clone() (*RemoteWriteConfig, error) {
// RegisterFlags adds the flags required to config this to the given FlagSet.
func (c *RemoteWriteConfig) RegisterFlags(f *flag.FlagSet) {
+ f.BoolVar(&c.AddOrgIDHeader, "ruler.remote-write.add-org-id-header", true, "Add X-Scope-OrgID header in remote write requests.")
f.BoolVar(&c.Enabled, "ruler.remote-write.enabled", false, "Enable remote-write functionality.")
f.DurationVar(&c.ConfigRefreshPeriod, "ruler.remote-write.config-refresh-period", 10*time.Second, "Minimum period to wait between refreshing remote-write reconfigurations. This should be greater than or equivalent to -limits.per-user-override-period.")
diff --git a/pkg/ruler/registry.go b/pkg/ruler/registry.go
index adb4f7cf8667..90a68d60c90b 100644
--- a/pkg/ruler/registry.go
+++ b/pkg/ruler/registry.go
@@ -212,8 +212,10 @@ func (r *walRegistry) getTenantConfig(tenant string) (instance.Config, error) {
}
}
- // always inject the X-Scope-OrgId header for multi-tenant metrics backends
- clt.Headers[user.OrgIDHeaderName] = tenant
+ if rwCfg.AddOrgIDHeader {
+ // inject the X-Scope-OrgId header for multi-tenant metrics backends
+ clt.Headers[user.OrgIDHeaderName] = tenant
+ }
rwCfg.Clients[id] = clt
diff --git a/pkg/ruler/registry_test.go b/pkg/ruler/registry_test.go
index 9e200e43ad3a..46ab9a708457 100644
--- a/pkg/ruler/registry_test.go
+++ b/pkg/ruler/registry_test.go
@@ -47,6 +47,7 @@ const remote2 = "remote-2"
var remoteURL, _ = url.Parse("http://remote-write")
var backCompatCfg = Config{
RemoteWrite: RemoteWriteConfig{
+ AddOrgIDHeader: true,
Client: &config.RemoteWriteConfig{
URL: &promConfig.URL{URL: remoteURL},
QueueConfig: config.QueueConfig{
@@ -105,6 +106,7 @@ var backCompatCfg = Config{
var remoteURL2, _ = url.Parse("http://remote-write2")
var cfg = Config{
RemoteWrite: RemoteWriteConfig{
+ AddOrgIDHeader: true,
Clients: map[string]config.RemoteWriteConfig{
remote1: {
URL: &promConfig.URL{URL: remoteURL},
@@ -751,6 +753,43 @@ func TestTenantRemoteWriteHeadersNoOverride(t *testing.T) {
assert.ElementsMatch(t, actual, expected, "Headers do not match")
}
+func TestTenantRemoteWriteHeadersNoOrgIDHeader(t *testing.T) {
+ backCompatCfg.RemoteWrite.AddOrgIDHeader = false
+ reg := setupRegistry(t, backCompatCfg, newFakeLimitsBackwardCompat())
+
+ tenantCfg, err := reg.getTenantConfig(enabledRWTenant)
+ require.NoError(t, err)
+
+ assert.Len(t, tenantCfg.RemoteWrite[0].Headers, 1)
+ // ensure that X-Scope-OrgId header is missing
+ assert.Equal(t, tenantCfg.RemoteWrite[0].Headers[user.OrgIDHeaderName], "")
+ // the original header must be present
+ assert.Equal(t, tenantCfg.RemoteWrite[0].Headers["Base"], "value")
+
+ cfg.RemoteWrite.AddOrgIDHeader = false
+ reg = setupRegistry(t, cfg, newFakeLimits())
+
+ tenantCfg, err = reg.getTenantConfig(enabledRWTenant)
+ require.NoError(t, err)
+
+ // Ensure that overrides take plus and that X-Scope-OrgID header is still missing
+ expected := []map[string]string{
+ {
+ "Base": "value",
+ },
+ {
+ "Base": "value2",
+ },
+ }
+
+ actual := []map[string]string{}
+ for _, rw := range tenantCfg.RemoteWrite {
+ actual = append(actual, rw.Headers)
+ }
+
+ assert.ElementsMatch(t, actual, expected, "Headers do not match")
+}
+
func TestRelabelConfigOverrides(t *testing.T) {
reg := setupRegistry(t, backCompatCfg, newFakeLimitsBackwardCompat())
From 9e7725b31b19792dad692afd9ad7e9804c04bfc1 Mon Sep 17 00:00:00 2001
From: Ashwanth
Date: Wed, 14 Feb 2024 16:46:38 +0530
Subject: [PATCH 062/130] feat(metadata): introduce a separate split interval
for recent query window (#11897)
---
CHANGELOG.md | 1 +
docs/sources/configure/_index.md | 24 ++
pkg/bloomgateway/cache.go | 1 +
pkg/querier/queryrange/index_stats_cache.go | 1 +
pkg/querier/queryrange/labels_cache.go | 33 +-
pkg/querier/queryrange/labels_cache_test.go | 334 +++++++---------
pkg/querier/queryrange/limits/definitions.go | 2 +
.../queryrangebase/results_cache.go | 2 +
.../queryrangebase/results_cache_test.go | 3 +
pkg/querier/queryrange/roundtrip.go | 3 +-
pkg/querier/queryrange/roundtrip_test.go | 52 ++-
pkg/querier/queryrange/series_cache.go | 9 +-
pkg/querier/queryrange/series_cache_test.go | 366 +++++++----------
.../queryrange/split_by_interval_test.go | 370 +++++++++++++++++-
pkg/querier/queryrange/splitters.go | 69 +++-
pkg/querier/queryrange/volume_cache.go | 1 +
pkg/storage/chunk/cache/resultscache/cache.go | 24 +-
.../chunk/cache/resultscache/cache_test.go | 120 +++++-
pkg/validation/limits.go | 37 +-
19 files changed, 955 insertions(+), 497 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 68841de451db..7f091ed06f88 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -55,6 +55,7 @@
* [11143](https://github.com/grafana/loki/pull/11143) **sandeepsukhani** otel: Add support for per tenant configuration for mapping otlp data to loki format
* [11499](https://github.com/grafana/loki/pull/11284) **jmichalek132** Config: Adds `frontend.log-query-request-headers` to enable logging of request headers in query logs.
* [11817](https://github.com/grafana/loki/pull/11817) **ashwanthgoli** Ruler: Add support for filtering results of `/prometheus/api/v1/rules` endpoint by rule_name, rule_group, file and type.
+* [11897](https://github.com/grafana/loki/pull/11897) **ashwanthgoli** Metadata: Introduces a separate split interval of `split_recent_metadata_queries_by_interval` for `recent_metadata_query_window` to help with caching recent metadata query results.
##### Fixes
* [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var.
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index 382890b5bcab..c30f8da01fa2 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -2911,6 +2911,30 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# CLI flag: -querier.split-metadata-queries-by-interval
[split_metadata_queries_by_interval: | default = 1d]
+# Experimental. Split interval to use for the portion of metadata request that
+# falls within `recent_metadata_query_window`. Rest of the request which is
+# outside the window still uses `split_metadata_queries_by_interval`. If set to
+# 0, the entire request defaults to using a split interval of
+# `split_metadata_queries_by_interval.`.
+# CLI flag: -experimental.querier.split-recent-metadata-queries-by-interval
+[split_recent_metadata_queries_by_interval: | default = 1h]
+
+# Experimental. Metadata query window inside which
+# `split_recent_metadata_queries_by_interval` gets applied, portion of the
+# metadata request that falls in this window is split using
+# `split_recent_metadata_queries_by_interval`. The value 0 disables using a
+# different split interval for recent metadata queries.
+#
+# This is added to improve cacheability of recent metadata queries. Query split
+# interval also determines the interval used in cache key. The default split
+# interval of 24h is useful for caching long queries, each cache key holding 1
+# day's results. But metadata queries are often shorter than 24h, to cache them
+# effectively we need a smaller split interval. `recent_metadata_query_window`
+# along with `split_recent_metadata_queries_by_interval` help configure a
+# shorter split interval for recent metadata queries.
+# CLI flag: -experimental.querier.recent-metadata-query-window
+[recent_metadata_query_window: | default = 0s]
+
# Interval to use for time-based splitting when a request is within the
# `query_ingesters_within` window; defaults to `split-queries-by-interval` by
# setting to 0.
diff --git a/pkg/bloomgateway/cache.go b/pkg/bloomgateway/cache.go
index fe40b87e9548..6c573cb47d6d 100644
--- a/pkg/bloomgateway/cache.go
+++ b/pkg/bloomgateway/cache.go
@@ -182,6 +182,7 @@ func NewBloomGatewayClientCacheMiddleware(
},
cacheGen,
retentionEnabled,
+ false,
)
return &ClientCache{
diff --git a/pkg/querier/queryrange/index_stats_cache.go b/pkg/querier/queryrange/index_stats_cache.go
index d52f2e22323f..a91721bf3687 100644
--- a/pkg/querier/queryrange/index_stats_cache.go
+++ b/pkg/querier/queryrange/index_stats_cache.go
@@ -123,6 +123,7 @@ func NewIndexStatsCacheMiddleware(
},
parallelismForReq,
retentionEnabled,
+ false,
metrics,
)
}
diff --git a/pkg/querier/queryrange/labels_cache.go b/pkg/querier/queryrange/labels_cache.go
index 66c811490403..3a940e34fa03 100644
--- a/pkg/querier/queryrange/labels_cache.go
+++ b/pkg/querier/queryrange/labels_cache.go
@@ -11,21 +11,42 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
- "github.com/grafana/loki/pkg/util"
+ "github.com/grafana/loki/pkg/util/validation"
)
type cacheKeyLabels struct {
Limits
transformer UserIDTransformer
- iqo util.IngesterQueryOptions
+}
+
+// metadataSplitIntervalForTimeRange returns split interval for series and label requests.
+// If `recent_metadata_query_window` is configured and the query start interval is within this window,
+// it returns `split_recent_metadata_queries_by_interval`.
+// For other cases, the default split interval of `split_metadata_queries_by_interval` will be used.
+func metadataSplitIntervalForTimeRange(limits Limits, tenantIDs []string, ref, start time.Time) time.Duration {
+ split := validation.MaxDurationOrZeroPerTenant(tenantIDs, limits.MetadataQuerySplitDuration)
+
+ recentMetadataQueryWindow := validation.MaxDurationOrZeroPerTenant(tenantIDs, limits.RecentMetadataQueryWindow)
+ recentMetadataQuerySplitInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, limits.RecentMetadataQuerySplitDuration)
+
+ // if either of the options are not configured, use the default metadata split interval
+ if recentMetadataQueryWindow == 0 || recentMetadataQuerySplitInterval == 0 {
+ return split
+ }
+
+ // if the query start is not before window start, it would be split using recentMetadataQuerySplitInterval
+ if windowStart := ref.Add(-recentMetadataQueryWindow); !start.Before(windowStart) {
+ split = recentMetadataQuerySplitInterval
+ }
+
+ return split
}
// GenerateCacheKey generates a cache key based on the userID, split duration and the interval of the request.
// It also includes the label name and the provided query for label values request.
func (i cacheKeyLabels) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
lr := r.(*LabelRequest)
-
- split := SplitIntervalForTimeRange(i.iqo, i.Limits, i.MetadataQuerySplitDuration, []string{userID}, time.Now().UTC(), r.GetEnd().UTC())
+ split := metadataSplitIntervalForTimeRange(i.Limits, []string{userID}, time.Now().UTC(), r.GetStart().UTC())
var currentInterval int64
if denominator := int64(split / time.Millisecond); denominator > 0 {
@@ -80,7 +101,6 @@ func NewLabelsCacheMiddleware(
merger queryrangebase.Merger,
c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
- iqo util.IngesterQueryOptions,
shouldCache queryrangebase.ShouldCacheFn,
parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool,
@@ -90,7 +110,7 @@ func NewLabelsCacheMiddleware(
return queryrangebase.NewResultsCacheMiddleware(
logger,
c,
- cacheKeyLabels{limits, transformer, iqo},
+ cacheKeyLabels{limits, transformer},
limits,
merger,
labelsExtractor{},
@@ -100,6 +120,7 @@ func NewLabelsCacheMiddleware(
},
parallelismForReq,
retentionEnabled,
+ true,
metrics,
)
}
diff --git a/pkg/querier/queryrange/labels_cache_test.go b/pkg/querier/queryrange/labels_cache_test.go
index 4c645b8d19ce..90b85cb1faf8 100644
--- a/pkg/querier/queryrange/labels_cache_test.go
+++ b/pkg/querier/queryrange/labels_cache_test.go
@@ -70,7 +70,6 @@ func TestLabelsCache(t *testing.T) {
cache.NewMockCache(),
nil,
nil,
- nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1
},
@@ -82,173 +81,124 @@ func TestLabelsCache(t *testing.T) {
return cacheMiddleware
}
- cacheMiddleware := setupCacheMW()
- for _, values := range []bool{false, true} {
- prefix := "labels"
- if values {
- prefix = "label values"
- }
- t.Run(prefix+": cache the response for the same request", func(t *testing.T) {
- start := testTime.Truncate(time.Millisecond)
- end := start.Add(time.Hour)
-
- labelsReq := LabelRequest{
- LabelRequest: logproto.LabelRequest{
- Start: &start,
- End: &end,
- },
- }
-
- if values {
- labelsReq.Values = true
- labelsReq.Name = "foo"
- labelsReq.Query = `{cluster="eu-west1"}`
- }
-
- labelsResp := &LokiLabelNamesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []string{"bar", "buzz"},
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 1,
- },
+ composeLabelsResp := func(lbls []string, splits int64) *LokiLabelNamesResponse {
+ return &LokiLabelNamesResponse{
+ Status: "success",
+ Version: uint32(loghttp.VersionV1),
+ Data: lbls,
+ Statistics: stats.Result{
+ Summary: stats.Summary{
+ Splits: splits,
},
- }
-
- called := 0
- handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
- called++
-
- // should request the entire length with no partitioning as nothing is cached yet.
- require.Equal(t, labelsReq.GetStart(), r.GetStart())
- require.Equal(t, labelsReq.GetEnd(), r.GetEnd())
-
- got := r.(*LabelRequest)
- require.Equal(t, labelsReq.GetName(), got.GetName())
- require.Equal(t, labelsReq.GetValues(), got.GetValues())
- require.Equal(t, labelsReq.GetQuery(), got.GetQuery())
-
- return labelsResp, nil
- }))
+ },
+ }
- ctx := user.InjectOrgID(context.Background(), "fake")
- got, err := handler.Do(ctx, &labelsReq)
- require.NoError(t, err)
- require.Equal(t, 1, called) // called actual handler, as not cached.
- require.Equal(t, labelsResp, got)
+ }
- // Doing same request again shouldn't change anything.
- called = 0
- got, err = handler.Do(ctx, &labelsReq)
- require.NoError(t, err)
- require.Equal(t, 0, called)
- require.Equal(t, labelsResp, got)
- })
+ start := testTime.Truncate(time.Millisecond)
+ end := start.Add(time.Hour)
+ labelsReq := &LabelRequest{
+ LabelRequest: logproto.LabelRequest{
+ Start: &start,
+ End: &end,
+ },
}
+ labelsResp := composeLabelsResp([]string{"bar", "buzz"}, 1)
+
+ var downstreamHandlerFunc func(context.Context, queryrangebase.Request) (queryrangebase.Response, error)
+ downstreamHandler := &mockDownstreamHandler{fn: func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
+ return downstreamHandlerFunc(ctx, req)
+ }}
- // reset cacheMiddleware
- cacheMiddleware = setupCacheMW()
for _, values := range []bool{false, true} {
+ labelsReq := labelsReq
prefix := "labels"
+
if values {
- prefix = "label values"
+ prefix = "label values: "
+ labelsReq.Values = true
+ labelsReq.Name = "foo"
+ labelsReq.Query = `{cluster="eu-west1"}`
}
- t.Run(prefix+": a new request with overlapping time range should reuse part of the previous request for the overlap", func(t *testing.T) {
- cacheMiddleware := setupCacheMW()
-
- start := testTime.Truncate(time.Millisecond)
- end := start.Add(time.Hour)
- labelsReq1 := LabelRequest{
- LabelRequest: logproto.LabelRequest{
- Start: &start,
- End: &end,
- },
- }
-
- if values {
- labelsReq1.Values = true
- labelsReq1.Name = "foo"
- labelsReq1.Query = `{cluster="eu-west1"}`
- }
-
- labelsResp1 := &LokiLabelNamesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []string{"bar", "buzz"},
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 1,
- },
- },
- }
+ for _, tc := range []struct {
+ name string
+ req queryrangebase.Request
+ expectedQueryStart, expectedQueryEnd time.Time
+ downstreamResponse *LokiLabelNamesResponse
+ downstreamCalls int
+ expectedReponse *LokiLabelNamesResponse
+ }{
+ {
+ name: "return cached response for the same request",
+ downstreamCalls: 0,
+ expectedReponse: labelsResp,
+ req: labelsReq,
+ },
+ {
+ name: "a new request with overlapping time range should reuse results of the previous request",
+ req: labelsReq.WithStartEnd(labelsReq.GetStart(), labelsReq.GetEnd().Add(15*time.Minute)),
+ expectedQueryStart: labelsReq.GetEnd(),
+ expectedQueryEnd: labelsReq.GetEnd().Add(15 * time.Minute),
+ downstreamCalls: 1,
+ downstreamResponse: composeLabelsResp([]string{"fizz"}, 1),
+ expectedReponse: composeLabelsResp([]string{"bar", "buzz", "fizz"}, 2),
+ },
+ {
+ // To avoid returning incorrect results, we only use extents that are entirely within the requested query range.
+ name: "cached response not entirely within the requested range",
+ req: labelsReq.WithStartEnd(labelsReq.GetStart().Add(15*time.Minute), labelsReq.GetEnd().Add(-15*time.Minute)),
+ expectedQueryStart: labelsReq.GetStart().Add(15 * time.Minute),
+ expectedQueryEnd: labelsReq.GetEnd().Add(-15 * time.Minute),
+ downstreamCalls: 1,
+ downstreamResponse: composeLabelsResp([]string{"buzz", "fizz"}, 1),
+ expectedReponse: composeLabelsResp([]string{"buzz", "fizz"}, 1),
+ },
+ } {
+ t.Run(prefix+tc.name, func(t *testing.T) {
+ cacheMiddleware := setupCacheMW()
+ downstreamHandler.ResetCount()
+ downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
+ // should request the entire length with no partitioning as nothing is cached yet.
+ require.Equal(t, labelsReq.GetStart(), r.GetStart())
+ require.Equal(t, labelsReq.GetEnd(), r.GetEnd())
+
+ got := r.(*LabelRequest)
+ require.Equal(t, labelsReq.GetName(), got.GetName())
+ require.Equal(t, labelsReq.GetValues(), got.GetValues())
+ require.Equal(t, labelsReq.GetQuery(), got.GetQuery())
+
+ return labelsResp, nil
+ }
- called := 0
- handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
- called++
+ handler := cacheMiddleware.Wrap(downstreamHandler)
- // should request the entire length with no partitioning as nothing is cached yet.
- require.Equal(t, labelsReq1.GetStart(), r.GetStart())
- require.Equal(t, labelsReq1.GetEnd(), r.GetEnd())
+ ctx := user.InjectOrgID(context.Background(), "fake")
+ got, err := handler.Do(ctx, labelsReq)
+ require.NoError(t, err)
+ require.Equal(t, 1, downstreamHandler.Called()) // call downstream handler, as not cached.
+ require.Equal(t, labelsResp, got)
- got := r.(*LabelRequest)
- require.Equal(t, labelsReq1.GetName(), got.GetName())
- require.Equal(t, labelsReq1.GetValues(), got.GetValues())
- require.Equal(t, labelsReq1.GetQuery(), got.GetQuery())
+ downstreamHandler.ResetCount()
+ downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
+ require.Equal(t, tc.expectedQueryStart, r.GetStart())
+ require.Equal(t, tc.expectedQueryEnd, r.GetEnd())
- return labelsResp1, nil
- }))
+ got := r.(*LabelRequest)
+ require.Equal(t, labelsReq.GetName(), got.GetName())
+ require.Equal(t, labelsReq.GetValues(), got.GetValues())
+ require.Equal(t, labelsReq.GetQuery(), got.GetQuery())
- ctx := user.InjectOrgID(context.Background(), "fake")
- got, err := handler.Do(ctx, &labelsReq1)
- require.NoError(t, err)
- require.Equal(t, 1, called)
- require.Equal(t, labelsResp1, got)
+ return tc.downstreamResponse, nil
+ }
- labelsReq2 := labelsReq1.WithStartEnd(labelsReq1.GetStart().Add(15*time.Minute), labelsReq1.GetEnd().Add(15*time.Minute))
+ got, err = handler.Do(ctx, tc.req)
+ require.NoError(t, err)
+ require.Equal(t, tc.downstreamCalls, downstreamHandler.Called())
+ require.Equal(t, tc.expectedReponse, got)
- called = 0
- handler = cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
- called++
-
- // make downstream request only for the non-overlapping portion of the query.
- require.Equal(t, labelsReq1.GetEnd(), r.GetStart())
- require.Equal(t, labelsReq2.GetEnd(), r.GetEnd())
-
- got := r.(*LabelRequest)
- require.Equal(t, labelsReq1.GetName(), got.GetName())
- require.Equal(t, labelsReq1.GetValues(), got.GetValues())
- require.Equal(t, labelsReq1.GetQuery(), got.GetQuery())
-
- return &LokiLabelNamesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []string{"fizz"},
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 1,
- },
- },
- }, nil
- }))
-
- got, err = handler.Do(ctx, labelsReq2)
- require.NoError(t, err)
- require.Equal(t, 1, called)
- // two splits as we merge the results from the extent and downstream request
- labelsResp1.Statistics.Summary.Splits = 2
- require.Equal(t, &LokiLabelNamesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []string{"bar", "buzz", "fizz"},
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 2,
- },
- },
- }, got)
- })
+ })
+ }
}
}
@@ -310,7 +260,6 @@ func TestLabelCache_freshness(t *testing.T) {
cache.NewMockCache(),
nil,
nil,
- nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1
},
@@ -363,67 +312,46 @@ func TestLabelCache_freshness(t *testing.T) {
func TestLabelQueryCacheKey(t *testing.T) {
const (
- defaultTenant = "a"
- alternateTenant = "b"
- defaultSplit = time.Hour
- ingesterSplit = 90 * time.Minute
- ingesterQueryWindow = defaultSplit * 3
+ defaultSplit = time.Hour
+ recentMetadataSplitDuration = 30 * time.Minute
+ recentMetadataQueryWindow = time.Hour
)
l := fakeLimits{
- metadataSplitDuration: map[string]time.Duration{defaultTenant: defaultSplit, alternateTenant: defaultSplit},
- ingesterSplitDuration: map[string]time.Duration{defaultTenant: ingesterSplit},
+ metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: recentMetadataSplitDuration},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
}
cases := []struct {
- name, tenantID string
- start, end time.Time
- expectedSplit time.Duration
- iqo util.IngesterQueryOptions
- values bool
+ name string
+ start, end time.Time
+ expectedSplit time.Duration
+ values bool
+ limits Limits
}{
{
- name: "outside ingester query window",
- tenantID: defaultTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-5 * time.Hour),
+ name: "outside recent metadata query window",
+ start: time.Now().Add(-3 * time.Hour),
+ end: time.Now().Add(-2 * time.Hour),
expectedSplit: defaultSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: false,
- },
- },
- {
- name: "within ingester query window",
- tenantID: defaultTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-ingesterQueryWindow / 2),
- expectedSplit: ingesterSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: false,
- },
+ limits: l,
},
{
- name: "within ingester query window, but query store only",
- tenantID: defaultTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-ingesterQueryWindow / 2),
- expectedSplit: defaultSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: true,
- },
+ name: "within recent metadata query window",
+ start: time.Now().Add(-30 * time.Minute),
+ end: time.Now(),
+ expectedSplit: recentMetadataSplitDuration,
+ limits: l,
},
{
- name: "within ingester query window, but no ingester split duration configured",
- tenantID: alternateTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-ingesterQueryWindow / 2),
+ name: "within recent metadata query window, but recent split duration is not configured",
+ start: time.Now().Add(-30 * time.Minute),
+ end: time.Now(),
expectedSplit: defaultSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: false,
+ limits: fakeLimits{
+ metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
},
},
}
@@ -431,7 +359,7 @@ func TestLabelQueryCacheKey(t *testing.T) {
for _, values := range []bool{true, false} {
for _, tc := range cases {
t.Run(fmt.Sprintf("%s (values: %v)", tc.name, values), func(t *testing.T) {
- keyGen := cacheKeyLabels{l, nil, tc.iqo}
+ keyGen := cacheKeyLabels{tc.limits, nil}
r := &LabelRequest{
LabelRequest: logproto.LabelRequest{
@@ -453,12 +381,12 @@ func TestLabelQueryCacheKey(t *testing.T) {
// and therefore we can't know the current interval apriori without duplicating the logic
var pattern *regexp.Regexp
if values {
- pattern = regexp.MustCompile(fmt.Sprintf(`labelvalues:%s:%s:%s:(\d+):%d`, tc.tenantID, labelName, regexp.QuoteMeta(query), tc.expectedSplit))
+ pattern = regexp.MustCompile(fmt.Sprintf(`labelvalues:%s:%s:%s:(\d+):%d`, tenantID, labelName, regexp.QuoteMeta(query), tc.expectedSplit))
} else {
- pattern = regexp.MustCompile(fmt.Sprintf(`labels:%s:(\d+):%d`, tc.tenantID, tc.expectedSplit))
+ pattern = regexp.MustCompile(fmt.Sprintf(`labels:%s:(\d+):%d`, tenantID, tc.expectedSplit))
}
- require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tc.tenantID, r))
+ require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tenantID, r))
})
}
}
diff --git a/pkg/querier/queryrange/limits/definitions.go b/pkg/querier/queryrange/limits/definitions.go
index e12255883bf4..3e78b3442076 100644
--- a/pkg/querier/queryrange/limits/definitions.go
+++ b/pkg/querier/queryrange/limits/definitions.go
@@ -15,6 +15,8 @@ type Limits interface {
logql.Limits
QuerySplitDuration(string) time.Duration
MetadataQuerySplitDuration(string) time.Duration
+ RecentMetadataQuerySplitDuration(string) time.Duration
+ RecentMetadataQueryWindow(string) time.Duration
IngesterQuerySplitDuration(string) time.Duration
MaxQuerySeries(context.Context, string) int
MaxEntriesLimitPerQuery(context.Context, string) int
diff --git a/pkg/querier/queryrange/queryrangebase/results_cache.go b/pkg/querier/queryrange/queryrangebase/results_cache.go
index 097dc264d32a..3511fe0b7dd3 100644
--- a/pkg/querier/queryrange/queryrangebase/results_cache.go
+++ b/pkg/querier/queryrange/queryrangebase/results_cache.go
@@ -127,6 +127,7 @@ func NewResultsCacheMiddleware(
shouldCache ShouldCacheFn,
parallelismForReq ParallelismForReqFn,
retentionEnabled bool,
+ onlyUseEntireExtent bool,
metrics *ResultsCacheMetrics,
) (Middleware, error) {
if cacheGenNumberLoader != nil {
@@ -172,6 +173,7 @@ func NewResultsCacheMiddleware(
parallelismForReqWrapper,
cacheGenNumberLoader,
retentionEnabled,
+ onlyUseEntireExtent,
)
return out
diff --git a/pkg/querier/queryrange/queryrangebase/results_cache_test.go b/pkg/querier/queryrange/queryrangebase/results_cache_test.go
index ff5e5be09a48..6706e6a2d9fa 100644
--- a/pkg/querier/queryrange/queryrangebase/results_cache_test.go
+++ b/pkg/querier/queryrange/queryrangebase/results_cache_test.go
@@ -422,6 +422,7 @@ func TestResultsCache(t *testing.T) {
return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
},
false,
+ false,
nil,
)
require.NoError(t, err)
@@ -468,6 +469,7 @@ func TestResultsCacheRecent(t *testing.T) {
return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
},
false,
+ false,
nil,
)
require.NoError(t, err)
@@ -578,6 +580,7 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) {
return mockLimits{}.MaxQueryParallelism(context.Background(), "fake")
},
false,
+ false,
nil,
)
require.NoError(t, err)
diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go
index 8223704eea02..10246f4d8277 100644
--- a/pkg/querier/queryrange/roundtrip.go
+++ b/pkg/querier/queryrange/roundtrip.go
@@ -514,7 +514,6 @@ func NewSeriesTripperware(
merger,
c,
cacheGenNumLoader,
- iqo,
func(_ context.Context, r base.Request) bool {
return !r.GetCachingOptions().Disabled
},
@@ -600,7 +599,6 @@ func NewLabelsTripperware(
merger,
c,
cacheGenNumLoader,
- iqo,
func(_ context.Context, r base.Request) bool {
return !r.GetCachingOptions().Disabled
},
@@ -679,6 +677,7 @@ func NewMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logge
)
},
retentionEnabled,
+ false,
metrics.ResultsCacheMetrics,
)
if err != nil {
diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go
index c7c7cff4595a..7d74b0dd615c 100644
--- a/pkg/querier/queryrange/roundtrip_test.go
+++ b/pkg/querier/queryrange/roundtrip_test.go
@@ -1237,24 +1237,26 @@ func TestMetricsTripperware_SplitShardStats(t *testing.T) {
}
type fakeLimits struct {
- maxQueryLength time.Duration
- maxQueryParallelism int
- tsdbMaxQueryParallelism int
- maxQueryLookback time.Duration
- maxEntriesLimitPerQuery int
- maxSeries int
- splitDuration map[string]time.Duration
- metadataSplitDuration map[string]time.Duration
- ingesterSplitDuration map[string]time.Duration
- minShardingLookback time.Duration
- queryTimeout time.Duration
- requiredLabels []string
- requiredNumberLabels int
- maxQueryBytesRead int
- maxQuerierBytesRead int
- maxStatsCacheFreshness time.Duration
- maxMetadataCacheFreshness time.Duration
- volumeEnabled bool
+ maxQueryLength time.Duration
+ maxQueryParallelism int
+ tsdbMaxQueryParallelism int
+ maxQueryLookback time.Duration
+ maxEntriesLimitPerQuery int
+ maxSeries int
+ splitDuration map[string]time.Duration
+ metadataSplitDuration map[string]time.Duration
+ recentMetadataSplitDuration map[string]time.Duration
+ recentMetadataQueryWindow map[string]time.Duration
+ ingesterSplitDuration map[string]time.Duration
+ minShardingLookback time.Duration
+ queryTimeout time.Duration
+ requiredLabels []string
+ requiredNumberLabels int
+ maxQueryBytesRead int
+ maxQuerierBytesRead int
+ maxStatsCacheFreshness time.Duration
+ maxMetadataCacheFreshness time.Duration
+ volumeEnabled bool
}
func (f fakeLimits) QuerySplitDuration(key string) time.Duration {
@@ -1271,6 +1273,20 @@ func (f fakeLimits) MetadataQuerySplitDuration(key string) time.Duration {
return f.metadataSplitDuration[key]
}
+func (f fakeLimits) RecentMetadataQuerySplitDuration(key string) time.Duration {
+ if f.recentMetadataSplitDuration == nil {
+ return 0
+ }
+ return f.recentMetadataSplitDuration[key]
+}
+
+func (f fakeLimits) RecentMetadataQueryWindow(key string) time.Duration {
+ if f.recentMetadataQueryWindow == nil {
+ return 0
+ }
+ return f.recentMetadataQueryWindow[key]
+}
+
func (f fakeLimits) IngesterQuerySplitDuration(key string) time.Duration {
if f.ingesterSplitDuration == nil {
return 0
diff --git a/pkg/querier/queryrange/series_cache.go b/pkg/querier/queryrange/series_cache.go
index bbbf96e2dd70..5120d61fb0b4 100644
--- a/pkg/querier/queryrange/series_cache.go
+++ b/pkg/querier/queryrange/series_cache.go
@@ -17,21 +17,18 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
- "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
type cacheKeySeries struct {
Limits
transformer UserIDTransformer
- iqo util.IngesterQueryOptions
}
// GenerateCacheKey generates a cache key based on the userID, matchers, split duration and the interval of the request.
func (i cacheKeySeries) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
sr := r.(*LokiSeriesRequest)
-
- split := SplitIntervalForTimeRange(i.iqo, i.Limits, i.MetadataQuerySplitDuration, []string{userID}, time.Now().UTC(), r.GetEnd().UTC())
+ split := metadataSplitIntervalForTimeRange(i.Limits, []string{userID}, time.Now().UTC(), r.GetStart().UTC())
var currentInterval int64
if denominator := int64(split / time.Millisecond); denominator > 0 {
@@ -87,7 +84,6 @@ func NewSeriesCacheMiddleware(
merger queryrangebase.Merger,
c cache.Cache,
cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
- iqo util.IngesterQueryOptions,
shouldCache queryrangebase.ShouldCacheFn,
parallelismForReq queryrangebase.ParallelismForReqFn,
retentionEnabled bool,
@@ -97,7 +93,7 @@ func NewSeriesCacheMiddleware(
return queryrangebase.NewResultsCacheMiddleware(
logger,
c,
- cacheKeySeries{limits, transformer, iqo},
+ cacheKeySeries{limits, transformer},
limits,
merger,
seriesExtractor{},
@@ -107,6 +103,7 @@ func NewSeriesCacheMiddleware(
},
parallelismForReq,
retentionEnabled,
+ true,
metrics,
)
}
diff --git a/pkg/querier/queryrange/series_cache_test.go b/pkg/querier/queryrange/series_cache_test.go
index d73efa9deea8..6ba869a69411 100644
--- a/pkg/querier/queryrange/series_cache_test.go
+++ b/pkg/querier/queryrange/series_cache_test.go
@@ -78,7 +78,6 @@ func TestSeriesCache(t *testing.T) {
cache.NewMockCache(),
nil,
nil,
- nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1
},
@@ -91,195 +90,135 @@ func TestSeriesCache(t *testing.T) {
return cacheMiddleware
}
- t.Run("caches the response for the same request", func(t *testing.T) {
- cacheMiddleware := setupCacheMW()
- from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
-
- seriesReq := &LokiSeriesRequest{
- StartTs: from.Time(),
- EndTs: through.Time(),
- Match: []string{`{namespace=~".*"}`},
- Path: seriesAPIPath,
+ composeSeriesResp := func(series [][]logproto.SeriesIdentifier_LabelsEntry, splits int64) *LokiSeriesResponse {
+ var data []logproto.SeriesIdentifier
+ for _, v := range series {
+ data = append(data, logproto.SeriesIdentifier{Labels: v})
}
- seriesResp := &LokiSeriesResponse{
+ return &LokiSeriesResponse{
Status: "success",
Version: uint32(loghttp.VersionV1),
- Data: []logproto.SeriesIdentifier{
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
- },
- },
+ Data: data,
Statistics: stats.Result{
Summary: stats.Summary{
- Splits: 1,
+ Splits: splits,
},
},
}
+ }
- called := 0
- handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
- called++
-
- // should request the entire length with no partitioning as nothing is cached yet.
- require.Equal(t, seriesReq.GetStart(), r.GetStart())
- require.Equal(t, seriesReq.GetEnd(), r.GetEnd())
-
- return seriesResp, nil
- }))
-
- ctx := user.InjectOrgID(context.Background(), "fake")
- got, err := handler.Do(ctx, seriesReq)
- require.NoError(t, err)
- require.Equal(t, 1, called) // called actual handler, as not cached.
- require.Equal(t, seriesResp, got)
-
- // Doing same request again shouldn't change anything.
- called = 0
- got, err = handler.Do(ctx, seriesReq)
- require.NoError(t, err)
- require.Equal(t, 0, called)
- require.Equal(t, seriesResp, got)
- })
-
- t.Run("a new request with overlapping time range should reuse part of the previous request for the overlap", func(t *testing.T) {
- cacheMiddleware := setupCacheMW()
-
- from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
- req1 := &LokiSeriesRequest{
- StartTs: from.Time(),
- EndTs: through.Time(),
- Match: []string{`{namespace=~".*"}`},
- Path: seriesAPIPath,
- }
- resp1 := &LokiSeriesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []logproto.SeriesIdentifier{
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
- },
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
- },
- },
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 1,
- },
- },
- }
-
- called := 0
- handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
- called++
-
- // should request the entire length with no partitioning as nothing is cached yet.
- require.Equal(t, req1.GetStart(), r.GetStart())
- require.Equal(t, req1.GetEnd(), r.GetEnd())
+ var downstreamHandlerFunc func(context.Context, queryrangebase.Request) (queryrangebase.Response, error)
+ downstreamHandler := &mockDownstreamHandler{fn: func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
+ return downstreamHandlerFunc(ctx, req)
+ }}
- return resp1, nil
- }))
+ from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
+ seriesReq := &LokiSeriesRequest{
+ StartTs: from.Time(),
+ EndTs: through.Time(),
+ Match: []string{`{namespace=~".*"}`},
+ Path: seriesAPIPath,
+ }
+ seriesResp := composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
+ {{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
+ {{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
+ }, 1)
+
+ for _, tc := range []struct {
+ name string
+ req queryrangebase.Request
+ expectedQueryStart, expectedQueryEnd time.Time
+ downstreamResponse *LokiSeriesResponse
+ downstreamCalls int
+ expectedReponse *LokiSeriesResponse
+ }{
+ {
+ name: "return cached response for the same request",
+ downstreamCalls: 0,
+ expectedReponse: seriesResp,
+ req: seriesReq,
+ },
+ {
+ name: "a new request with overlapping time range should reuse results of the previous request",
+ req: seriesReq.WithStartEnd(seriesReq.GetStart(), seriesReq.GetEnd().Add(15*time.Minute)),
+ expectedQueryStart: seriesReq.GetEnd(),
+ expectedQueryEnd: seriesReq.GetEnd().Add(15 * time.Minute),
+ downstreamCalls: 1,
+ downstreamResponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
+ {{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
+ }, 1),
+ expectedReponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
+ {{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
+ {{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
+ {{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
+ }, 2),
+ },
+ {
+ // To avoid returning incorrect results, we only use extents that are entirely within the requested query range.
+ name: "cached response not entirely within the requested range",
+ req: seriesReq.WithStartEnd(seriesReq.GetStart().Add(15*time.Minute), seriesReq.GetEnd().Add(-15*time.Minute)),
+ expectedQueryStart: seriesReq.GetStart().Add(15 * time.Minute),
+ expectedQueryEnd: seriesReq.GetEnd().Add(-15 * time.Minute),
+ downstreamCalls: 1,
+ downstreamResponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
+ {{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
+ }, 1),
+ expectedReponse: composeSeriesResp([][]logproto.SeriesIdentifier_LabelsEntry{
+ {{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
+ }, 1),
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ cacheMiddleware := setupCacheMW()
+ downstreamHandler.ResetCount()
+ downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
+ require.Equal(t, seriesReq.GetStart(), r.GetStart())
+ require.Equal(t, seriesReq.GetEnd(), r.GetEnd())
- ctx := user.InjectOrgID(context.Background(), "fake")
- got, err := handler.Do(ctx, req1)
- require.NoError(t, err)
- require.Equal(t, 1, called)
- require.Equal(t, resp1, got)
+ return seriesResp, nil
+ }
- req2 := req1.WithStartEnd(req1.GetStart().Add(15*time.Minute), req1.GetEnd().Add(15*time.Minute))
+ handler := cacheMiddleware.Wrap(downstreamHandler)
- called = 0
- handler = cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
- called++
+ ctx := user.InjectOrgID(context.Background(), "fake")
+ got, err := handler.Do(ctx, seriesReq)
+ require.NoError(t, err)
+ require.Equal(t, 1, downstreamHandler.Called()) // calls downstream handler, as not cached.
+ require.Equal(t, seriesResp, got)
- // make downstream request only for the non-overlapping portion of the query.
- require.Equal(t, req1.GetEnd(), r.GetStart())
- require.Equal(t, req1.GetEnd().Add(15*time.Minute), r.GetEnd())
+ downstreamHandler.ResetCount()
+ downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
+ require.Equal(t, tc.expectedQueryStart, r.GetStart())
+ require.Equal(t, tc.expectedQueryEnd, r.GetEnd())
- return &LokiSeriesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []logproto.SeriesIdentifier{
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
- },
- },
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 1,
- },
- },
- }, nil
- }))
+ return tc.downstreamResponse, nil
+ }
- got, err = handler.Do(ctx, req2)
- require.NoError(t, err)
- require.Equal(t, 1, called)
- // two splits as we merge the results from the extent and downstream request
- resp1.Statistics.Summary.Splits = 2
- require.Equal(t, &LokiSeriesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []logproto.SeriesIdentifier{
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "dev"}},
- },
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
- },
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "us-central"}, {Key: "namespace", Value: "prod"}},
- },
- },
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 2,
- },
- },
- }, got)
- })
+ got, err = handler.Do(ctx, tc.req)
+ require.NoError(t, err)
+ require.Equal(t, tc.downstreamCalls, downstreamHandler.Called())
+ require.Equal(t, tc.expectedReponse, got)
+ })
+ }
t.Run("caches are only valid for the same request parameters", func(t *testing.T) {
cacheMiddleware := setupCacheMW()
-
- from, through := util.RoundToMilliseconds(testTime, testTime.Add(1*time.Hour))
- seriesReq := &LokiSeriesRequest{
- StartTs: from.Time(),
- EndTs: through.Time(),
- Match: []string{`{namespace=~".*"}`},
- Path: seriesAPIPath,
- }
- seriesResp := &LokiSeriesResponse{
- Status: "success",
- Version: uint32(loghttp.VersionV1),
- Data: []logproto.SeriesIdentifier{
- {
- Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "cluster", Value: "eu-west"}, {Key: "namespace", Value: "prod"}},
- },
- },
- Statistics: stats.Result{
- Summary: stats.Summary{
- Splits: 1,
- },
- },
- }
-
- called := 0
- handler := cacheMiddleware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
- called++
-
- // should request the entire length as none of the subsequent queries hit the cache.
+ downstreamHandler.ResetCount()
+ downstreamHandlerFunc = func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
require.Equal(t, seriesReq.GetStart(), r.GetStart())
require.Equal(t, seriesReq.GetEnd(), r.GetEnd())
+
return seriesResp, nil
- }))
+ }
+
+ handler := cacheMiddleware.Wrap(downstreamHandler)
// initial call to fill cache
ctx := user.InjectOrgID(context.Background(), "fake")
_, err := handler.Do(ctx, seriesReq)
require.NoError(t, err)
- require.Equal(t, 1, called)
+ require.Equal(t, 1, downstreamHandler.Called())
type testCase struct {
fn func(*LokiSeriesRequest)
@@ -297,7 +236,7 @@ func TestSeriesCache(t *testing.T) {
}
for name, tc := range testCases {
- called = 0
+ downstreamHandler.ResetCount()
seriesReq := seriesReq
if tc.fn != nil {
@@ -310,7 +249,7 @@ func TestSeriesCache(t *testing.T) {
_, err = handler.Do(ctx, seriesReq)
require.NoError(t, err)
- require.Equal(t, 1, called, name)
+ require.Equal(t, 1, downstreamHandler.Called(), name)
}
})
}
@@ -371,7 +310,6 @@ func TestSeriesCache_freshness(t *testing.T) {
cache.NewMockCache(),
nil,
nil,
- nil,
func(_ context.Context, _ []string, _ queryrangebase.Request) int {
return 1
},
@@ -428,76 +366,54 @@ func TestSeriesCache_freshness(t *testing.T) {
func TestSeriesQueryCacheKey(t *testing.T) {
const (
- defaultTenant = "a"
- alternateTenant = "b"
- defaultSplit = time.Hour
- ingesterSplit = 90 * time.Minute
- ingesterQueryWindow = defaultSplit * 3
+ defaultSplit = time.Hour
+ recentMetadataSplitDuration = 30 * time.Minute
+ recentMetadataQueryWindow = time.Hour
)
l := fakeLimits{
- metadataSplitDuration: map[string]time.Duration{defaultTenant: defaultSplit, alternateTenant: defaultSplit},
- ingesterSplitDuration: map[string]time.Duration{defaultTenant: ingesterSplit},
+ metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: recentMetadataSplitDuration},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
}
cases := []struct {
- name, tenantID string
- start, end time.Time
- expectedSplit time.Duration
- iqo util.IngesterQueryOptions
- values bool
+ name string
+ start, end time.Time
+ expectedSplit time.Duration
+ values bool
+ limits Limits
}{
{
- name: "outside ingester query window",
- tenantID: defaultTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-5 * time.Hour),
+ name: "outside recent metadata query window",
+ start: time.Now().Add(-3 * time.Hour),
+ end: time.Now().Add(-2 * time.Hour),
expectedSplit: defaultSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: false,
- },
+ limits: l,
},
{
- name: "within ingester query window",
- tenantID: defaultTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-ingesterQueryWindow / 2),
- expectedSplit: ingesterSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: false,
- },
+ name: "within recent metadata query window",
+ start: time.Now().Add(-30 * time.Minute),
+ end: time.Now(),
+ expectedSplit: recentMetadataSplitDuration,
+ limits: l,
},
{
- name: "within ingester query window, but query store only",
- tenantID: defaultTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-ingesterQueryWindow / 2),
+ name: "within recent metadata query window, but recent split duration is not configured",
+ start: time.Now().Add(-30 * time.Minute),
+ end: time.Now(),
expectedSplit: defaultSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: true,
- },
- },
- {
- name: "within ingester query window, but no ingester split duration configured",
- tenantID: alternateTenant,
- start: time.Now().Add(-6 * time.Hour),
- end: time.Now().Add(-ingesterQueryWindow / 2),
- expectedSplit: defaultSplit,
- iqo: ingesterQueryOpts{
- queryIngestersWithin: ingesterQueryWindow,
- queryStoreOnly: false,
+ limits: fakeLimits{
+ metadataSplitDuration: map[string]time.Duration{tenantID: defaultSplit},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: recentMetadataQueryWindow},
},
},
}
-
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
matchers := []string{`{namespace="prod"}`, `{service="foo"}`}
- keyGen := cacheKeySeries{l, nil, tc.iqo}
+ keyGen := cacheKeySeries{tc.limits, nil}
r := &LokiSeriesRequest{
StartTs: tc.start,
@@ -508,9 +424,27 @@ func TestSeriesQueryCacheKey(t *testing.T) {
// we use regex here because cache key always refers to the current time to get the ingester query window,
// and therefore we can't know the current interval apriori without duplicating the logic
- pattern := regexp.MustCompile(fmt.Sprintf(`series:%s:%s:(\d+):%d`, tc.tenantID, regexp.QuoteMeta(keyGen.joinMatchers(matchers)), tc.expectedSplit))
+ pattern := regexp.MustCompile(fmt.Sprintf(`series:%s:%s:(\d+):%d`, tenantID, regexp.QuoteMeta(keyGen.joinMatchers(matchers)), tc.expectedSplit))
- require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tc.tenantID, r))
+ require.Regexp(t, pattern, keyGen.GenerateCacheKey(context.Background(), tenantID, r))
})
}
}
+
+type mockDownstreamHandler struct {
+ called int
+ fn func(context.Context, queryrangebase.Request) (queryrangebase.Response, error)
+}
+
+func (m *mockDownstreamHandler) Called() int {
+ return m.called
+}
+
+func (m *mockDownstreamHandler) ResetCount() {
+ m.called = 0
+}
+
+func (m *mockDownstreamHandler) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
+ m.called++
+ return m.fn(ctx, req)
+}
diff --git a/pkg/querier/queryrange/split_by_interval_test.go b/pkg/querier/queryrange/split_by_interval_test.go
index acf8c495becc..c559bad17be1 100644
--- a/pkg/querier/queryrange/split_by_interval_test.go
+++ b/pkg/querier/queryrange/split_by_interval_test.go
@@ -159,10 +159,12 @@ func Test_splitQuery(t *testing.T) {
t.Run(requestType, func(t *testing.T) {
for name, intervals := range map[string]struct {
- input interval
- expected []interval
- splitInterval time.Duration
- splitter splitter
+ input interval
+ expected []interval
+ expectedWithoutIngesterSplits []interval
+ splitInterval time.Duration
+ splitter splitter
+ recentMetadataQueryWindowEnabled bool
}{
"no change": {
input: interval{
@@ -255,6 +257,16 @@ func Test_splitQuery(t *testing.T) {
end: refTime,
},
},
+ expectedWithoutIngesterSplits: []interval{
+ {
+ start: refTime.Add(-time.Hour).Truncate(time.Second),
+ end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
+ end: refTime,
+ },
+ },
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}},
@@ -295,6 +307,32 @@ func Test_splitQuery(t *testing.T) {
end: refTime,
},
},
+ expectedWithoutIngesterSplits: []interval{
+ {
+ start: refTime.Add(-4 * time.Hour).Add(-30 * time.Minute).Truncate(time.Second),
+ end: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 4, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
+ end: refTime,
+ },
+ },
splitInterval: time.Hour,
splitter: newDefaultSplitter(
fakeLimits{ingesterSplitDuration: map[string]time.Duration{tenantID: 90 * time.Minute}},
@@ -394,11 +432,63 @@ func Test_splitQuery(t *testing.T) {
ingesterQueryOpts{queryIngestersWithin: 3 * time.Hour, queryStoreOnly: true},
),
},
+ "metadata recent query window should not affect other query types": {
+ input: interval{
+ start: refTime.Add(-4 * time.Hour).Truncate(time.Second),
+ end: refTime,
+ },
+ expected: []interval{
+ {
+ start: refTime.Add(-4 * time.Hour).Truncate(time.Second),
+ end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
+ end: refTime,
+ },
+ },
+ splitInterval: time.Hour,
+ splitter: newDefaultSplitter(
+ fakeLimits{
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: 3 * time.Hour},
+ }, nil,
+ ),
+ recentMetadataQueryWindowEnabled: true,
+ },
} {
t.Run(name, func(t *testing.T) {
req := tc.requestBuilderFunc(intervals.input.start, intervals.input.end)
var want []queryrangebase.Request
- for _, exp := range intervals.expected {
+
+ // ingester splits do not apply for metadata queries
+ var expected []interval
+ switch req.(type) {
+ case *LabelRequest, *LokiSeriesRequest:
+ expected = intervals.expectedWithoutIngesterSplits
+
+ if intervals.recentMetadataQueryWindowEnabled {
+ t.Skip("this flow is tested in Test_splitRecentMetadataQuery")
+ }
+ }
+
+ if expected == nil {
+ expected = intervals.expected
+ }
+
+ for _, exp := range expected {
want = append(want, tc.requestBuilderFunc(exp.start, exp.end))
}
@@ -412,22 +502,245 @@ func Test_splitQuery(t *testing.T) {
splits, err := intervals.splitter.split(refTime, []string{tenantID}, req, intervals.splitInterval)
require.NoError(t, err)
- if !assert.Equal(t, want, splits) {
- t.Logf("expected and actual do not match\n")
- defer t.Fail()
+ assertSplits(t, want, splits)
+ })
+ }
+ })
+ }
+}
- if len(want) != len(splits) {
- t.Logf("expected %d splits, got %d\n", len(want), len(splits))
- return
- }
+func Test_splitRecentMetadataQuery(t *testing.T) {
+ type interval struct {
+ start, end time.Time
+ }
- for j := 0; j < len(want); j++ {
- exp := want[j]
- act := splits[j]
- equal := assert.Equal(t, exp, act)
- t.Logf("\t#%d [matches: %v]: expected %q/%q got %q/%q\n", j, equal, exp.GetStart(), exp.GetEnd(), act.GetStart(), act.GetEnd())
- }
+ expectedSplitGap := util.SplitGap
+
+ for requestType, tc := range map[string]struct {
+ requestBuilderFunc func(start, end time.Time) queryrangebase.Request
+ }{
+ "series request": {
+ requestBuilderFunc: func(start, end time.Time) queryrangebase.Request {
+ return &LokiSeriesRequest{
+ Match: []string{"match1"},
+ StartTs: start,
+ EndTs: end,
+ Path: "/series",
+ Shards: []string{"shard1"},
+ }
+ },
+ },
+ "label names request": {
+ requestBuilderFunc: func(start, end time.Time) queryrangebase.Request {
+ return NewLabelRequest(start, end, `{foo="bar"}`, "", "/labels")
+ },
+ },
+ "label values request": {
+ requestBuilderFunc: func(start, end time.Time) queryrangebase.Request {
+ return NewLabelRequest(start, end, `{foo="bar"}`, "test", "/label/test/values")
+ },
+ },
+ } {
+ t.Run(requestType, func(t *testing.T) {
+ for name, intervals := range map[string]struct {
+ input interval
+ expected []interval
+ splitInterval time.Duration
+ splitter splitter
+ }{
+ "wholly within recent metadata query window": {
+ input: interval{
+ start: refTime.Add(-time.Hour),
+ end: refTime,
+ },
+ expected: []interval{
+ {
+ start: refTime.Add(-time.Hour),
+ end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
+ end: refTime,
+ },
+ },
+ splitInterval: time.Hour,
+ splitter: newDefaultSplitter(
+ fakeLimits{
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: 2 * time.Hour},
+ }, nil,
+ ),
+ },
+ "start aligns with recent metadata query window": {
+ input: interval{
+ start: refTime.Add(-1 * time.Hour),
+ end: refTime,
+ },
+ expected: []interval{
+ {
+ start: refTime.Add(-time.Hour),
+ end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
+ end: refTime,
+ },
+ },
+ splitInterval: time.Hour,
+ splitter: newDefaultSplitter(
+ fakeLimits{
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
+ }, nil,
+ ),
+ },
+ "partially within recent metadata query window": {
+ input: interval{
+ start: refTime.Add(-3 * time.Hour),
+ end: refTime,
+ },
+ expected: []interval{
+ {
+ start: refTime.Add(-3 * time.Hour),
+ end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
+ end: refTime.Add(-time.Hour).Add(-expectedSplitGap),
+ },
+ // apply split_recent_metadata_queries_by_interval for recent metadata queries
+ {
+ start: refTime.Add(-time.Hour),
+ end: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 30, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
+ end: refTime,
+ },
+ },
+ splitInterval: time.Hour,
+ splitter: newDefaultSplitter(
+ fakeLimits{
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
+ }, nil,
+ ),
+ },
+ "outside recent metadata query window": {
+ input: interval{
+ start: refTime.Add(-4 * time.Hour),
+ end: refTime.Add(-2 * time.Hour),
+ },
+ expected: []interval{
+ {
+ start: refTime.Add(-4 * time.Hour),
+ end: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 5, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
+ end: refTime.Add(-2 * time.Hour),
+ },
+ },
+ splitInterval: time.Hour,
+ splitter: newDefaultSplitter(
+ fakeLimits{
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
+ }, nil,
+ ),
+ },
+ "end aligns with recent metadata query window": {
+ input: interval{
+ start: refTime.Add(-3 * time.Hour),
+ end: refTime.Add(-1 * time.Hour),
+ },
+ expected: []interval{
+ {
+ start: refTime.Add(-3 * time.Hour),
+ end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
+ end: refTime.Add(-1 * time.Hour),
+ },
+ },
+ splitInterval: time.Hour,
+ splitter: newDefaultSplitter(
+ fakeLimits{
+ recentMetadataSplitDuration: map[string]time.Duration{tenantID: 30 * time.Minute},
+ recentMetadataQueryWindow: map[string]time.Duration{tenantID: 1 * time.Hour},
+ }, nil,
+ ),
+ },
+ "recent metadata window not configured": {
+ input: interval{
+ start: refTime.Add(-3 * time.Hour),
+ end: refTime,
+ },
+ expected: []interval{
+ {
+ start: refTime.Add(-3 * time.Hour),
+ end: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 6, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 7, 0, 0, 0, time.UTC),
+ end: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC).Add(-expectedSplitGap),
+ },
+ {
+ start: time.Date(2023, 1, 15, 8, 0, 0, 0, time.UTC),
+ end: refTime,
+ },
+ },
+ splitInterval: time.Hour,
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ req := tc.requestBuilderFunc(intervals.input.start, intervals.input.end)
+ var want []queryrangebase.Request
+
+ for _, exp := range intervals.expected {
+ want = append(want, tc.requestBuilderFunc(exp.start, exp.end))
+ }
+
+ if intervals.splitInterval == 0 {
+ intervals.splitInterval = time.Hour
}
+
+ if intervals.splitter == nil {
+ intervals.splitter = newDefaultSplitter(fakeLimits{}, nil)
+ }
+
+ splits, err := intervals.splitter.split(refTime, []string{tenantID}, req, intervals.splitInterval)
+ require.NoError(t, err)
+ assertSplits(t, want, splits)
})
}
})
@@ -1610,3 +1923,24 @@ func Test_DoesntDeadlock(t *testing.T) {
// Allow for 1% increase in goroutines
require.LessOrEqual(t, endingGoroutines, startingGoroutines*101/100)
}
+
+func assertSplits(t *testing.T, want, splits []queryrangebase.Request) {
+ t.Helper()
+
+ if !assert.Equal(t, want, splits) {
+ t.Logf("expected and actual do not match\n")
+ defer t.Fail()
+
+ if len(want) != len(splits) {
+ t.Logf("expected %d splits, got %d\n", len(want), len(splits))
+ return
+ }
+
+ for j := 0; j < len(want); j++ {
+ exp := want[j]
+ act := splits[j]
+ equal := assert.Equal(t, exp, act)
+ t.Logf("\t#%d [matches: %v]: expected %q/%q got %q/%q\n", j, equal, exp.GetStart(), exp.GetEnd(), act.GetStart(), act.GetEnd())
+ }
+ }
+}
diff --git a/pkg/querier/queryrange/splitters.go b/pkg/querier/queryrange/splitters.go
index 0aaecf35cb96..eddcc10edf49 100644
--- a/pkg/querier/queryrange/splitters.go
+++ b/pkg/querier/queryrange/splitters.go
@@ -91,31 +91,56 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer
}
var (
- ingesterSplits []queryrangebase.Request
- origStart = req.GetStart().UTC()
- origEnd = req.GetEnd().UTC()
+ splitsBeforeRebound []queryrangebase.Request
+ origStart = req.GetStart().UTC()
+ origEnd = req.GetEnd().UTC()
+ start, end = origStart, origEnd
+
+ reboundOrigQuery bool
+ splitIntervalBeforeRebound time.Duration
)
- start, end, needsIngesterSplits := ingesterQueryBounds(execTime, s.iqo, req)
+ switch req.(type) {
+ // not applying `split_ingester_queries_by_interval` for metadata queries since it solves a different problem of reducing the subqueries sent to the ingesters.
+ // we instead prefer `split_recent_metadata_queries_by_interval` for metadata queries which favours shorter subqueries to improve cache effectiveness.
+ // even though the number of subqueries increase, caching should deamplify it overtime.
+ case *LokiSeriesRequest, *LabelRequest:
+ var (
+ recentMetadataQueryWindow = validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.RecentMetadataQueryWindow)
+ recentMetadataQuerySplitInterval = validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.RecentMetadataQuerySplitDuration)
+ )
+
+ // if either of them are not configured, we fallback to the default split interval for the entire query length.
+ if recentMetadataQueryWindow == 0 || recentMetadataQuerySplitInterval == 0 {
+ break
+ }
- if ingesterQueryInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.IngesterQuerySplitDuration); ingesterQueryInterval != 0 && needsIngesterSplits {
- // perform splitting using special interval (`split_ingester_queries_by_interval`)
- util.ForInterval(ingesterQueryInterval, start, end, endTimeInclusive, factory)
+ start, end, reboundOrigQuery = recentMetadataQueryBounds(execTime, recentMetadataQueryWindow, req)
+ splitIntervalBeforeRebound = recentMetadataQuerySplitInterval
+ default:
+ if ingesterQueryInterval := validation.MaxDurationOrZeroPerTenant(tenantIDs, s.limits.IngesterQuerySplitDuration); ingesterQueryInterval != 0 {
+ start, end, reboundOrigQuery = ingesterQueryBounds(execTime, s.iqo, req)
+ splitIntervalBeforeRebound = ingesterQueryInterval
+ }
+ }
- // rebound after ingester queries have been split out
+ if reboundOrigQuery {
+ util.ForInterval(splitIntervalBeforeRebound, start, end, endTimeInclusive, factory)
+
+ // rebound after query portion within ingester query window or recent metadata query window has been split out
end = start
- start = req.GetStart().UTC()
+ start = origStart
if endTimeInclusive {
end = end.Add(-util.SplitGap)
}
- // query only overlaps ingester query window, nothing more to do
+ // query only overlaps ingester query window or recent metadata query window, nothing more to do
if start.After(end) || start.Equal(end) {
return reqs, nil
}
// copy the splits, reset the results
- ingesterSplits = reqs
+ splitsBeforeRebound = reqs
reqs = nil
} else {
start = origStart
@@ -123,10 +148,10 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer
}
// perform splitting over the rest of the time range
- util.ForInterval(interval, origStart, end, endTimeInclusive, factory)
+ util.ForInterval(interval, start, end, endTimeInclusive, factory)
- // move the ingester splits to the end to maintain correct order
- reqs = append(reqs, ingesterSplits...)
+ // move the ingester or recent metadata splits to the end to maintain correct order
+ reqs = append(reqs, splitsBeforeRebound...)
return reqs, nil
}
@@ -270,6 +295,22 @@ func (s *metricQuerySplitter) buildMetricSplits(step int64, interval time.Durati
}
}
+func recentMetadataQueryBounds(execTime time.Time, recentMetadataQueryWindow time.Duration, req queryrangebase.Request) (time.Time, time.Time, bool) {
+ start, end := req.GetStart().UTC(), req.GetEnd().UTC()
+ windowStart := execTime.UTC().Add(-recentMetadataQueryWindow)
+
+ // rebound only if the query end is strictly inside the window
+ if !windowStart.Before(end) {
+ return start, end, false
+ }
+
+ if windowStart.Before(start) {
+ windowStart = start
+ }
+
+ return windowStart, end, true
+}
+
// ingesterQueryBounds determines if we need to split time ranges overlapping the ingester query window (`query_ingesters_within`)
// and retrieve the bounds for those specific splits
func ingesterQueryBounds(execTime time.Time, iqo util.IngesterQueryOptions, req queryrangebase.Request) (time.Time, time.Time, bool) {
diff --git a/pkg/querier/queryrange/volume_cache.go b/pkg/querier/queryrange/volume_cache.go
index 147d61912db9..5ae2af411115 100644
--- a/pkg/querier/queryrange/volume_cache.go
+++ b/pkg/querier/queryrange/volume_cache.go
@@ -131,6 +131,7 @@ func NewVolumeCacheMiddleware(
},
parallelismForReq,
retentionEnabled,
+ false,
metrics,
)
}
diff --git a/pkg/storage/chunk/cache/resultscache/cache.go b/pkg/storage/chunk/cache/resultscache/cache.go
index d05d71837404..3ea3e727b502 100644
--- a/pkg/storage/chunk/cache/resultscache/cache.go
+++ b/pkg/storage/chunk/cache/resultscache/cache.go
@@ -58,6 +58,7 @@ type ResultsCache struct {
merger ResponseMerger
shouldCacheReq ShouldCacheReqFn
shouldCacheRes ShouldCacheResFn
+ onlyUseEntireExtent bool
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int
}
@@ -79,7 +80,7 @@ func NewResultsCache(
shouldCacheRes ShouldCacheResFn,
parallelismForReq func(ctx context.Context, tenantIDs []string, r Request) int,
cacheGenNumberLoader CacheGenNumberLoader,
- retentionEnabled bool,
+ retentionEnabled, onlyUseEntireExtent bool,
) *ResultsCache {
return &ResultsCache{
logger: logger,
@@ -95,6 +96,7 @@ func NewResultsCache(
shouldCacheReq: shouldCacheReq,
shouldCacheRes: shouldCacheRes,
parallelismForReq: parallelismForReq,
+ onlyUseEntireExtent: onlyUseEntireExtent,
}
}
@@ -334,6 +336,25 @@ func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Res
continue
}
+ if s.onlyUseEntireExtent && (start > extent.GetStart() || end < extent.GetEnd()) {
+ // It is not possible to extract the overlapping portion of an extent for all request types.
+ // Metadata results for one cannot be extracted as the data portion is just a list of strings with no associated timestamp.
+ // To avoid returning incorrect results, we only use extents that are entirely within the requested query range.
+ //
+ // Start End
+ // ┌────────────────────────┐
+ // │ Req │
+ // └────────────────────────┘
+ //
+ // ◄──────────────► only this extent can be used. Remaining portion of the query will be added to requests.
+ //
+ //
+ // ◄──────X───────► cannot be partially extracted. will be discarded if onlyUseEntireExtent is set.
+ // ◄───────X──────►
+ // ◄───────────────X──────────────────►
+ continue
+ }
+
// If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries.
// Hopefully tiny request can make tiny extent into not-so-tiny extent.
@@ -353,6 +374,7 @@ func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Res
if err != nil {
return nil, nil, err
}
+
// extract the overlap from the cached extent.
cachedResponses = append(cachedResponses, s.extractor.Extract(start, end, res, extent.GetStart(), extent.GetEnd()))
start = extent.End
diff --git a/pkg/storage/chunk/cache/resultscache/cache_test.go b/pkg/storage/chunk/cache/resultscache/cache_test.go
index bacedd2dda6b..cff371097a68 100644
--- a/pkg/storage/chunk/cache/resultscache/cache_test.go
+++ b/pkg/storage/chunk/cache/resultscache/cache_test.go
@@ -61,7 +61,6 @@ func TestPartition(t *testing.T) {
mkAPIResponse(0, 100, 10),
},
},
-
{
name: "Test with a complete miss.",
input: &MockRequest{
@@ -182,6 +181,123 @@ func TestPartition(t *testing.T) {
}
}
+func TestPartition_onlyUseEntireExtent(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ input Request
+ prevCachedResponse []Extent
+ expectedRequests []Request
+ expectedCachedResponse []Response
+ }{
+ {
+ name: "overlapping extent - right",
+ input: &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(60, 120),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(100),
+ },
+ },
+ },
+ {
+ name: "overlapping extent - left",
+ input: &MockRequest{
+ Start: time.UnixMilli(20),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(0, 50),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(20),
+ End: time.UnixMilli(100),
+ },
+ },
+ },
+ {
+ name: "overlapping extent larger than the request",
+ input: &MockRequest{
+ Start: time.UnixMilli(20),
+ End: time.UnixMilli(100),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(0, 120),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(20),
+ End: time.UnixMilli(100),
+ },
+ },
+ },
+ {
+ name: "overlapping extent within the requested query range",
+ input: &MockRequest{
+ Start: time.UnixMilli(0),
+ End: time.UnixMilli(120),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(0, 100),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(100),
+ End: time.UnixMilli(120),
+ },
+ },
+ expectedCachedResponse: []Response{
+ mkAPIResponse(0, 100, 10),
+ },
+ },
+ {
+ name: "multiple overlapping extents",
+ input: &MockRequest{
+ Start: time.UnixMilli(50),
+ End: time.UnixMilli(200),
+ },
+ prevCachedResponse: []Extent{
+ mkExtent(0, 80),
+ mkExtent(100, 150),
+ mkExtent(150, 180),
+ mkExtent(200, 250),
+ },
+ expectedRequests: []Request{
+ &MockRequest{
+ Start: time.UnixMilli(50),
+ End: time.UnixMilli(100),
+ },
+ &MockRequest{
+ Start: time.UnixMilli(180),
+ End: time.UnixMilli(200),
+ },
+ },
+ expectedCachedResponse: []Response{
+ mkAPIResponse(100, 150, 10),
+ mkAPIResponse(150, 180, 10),
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ s := ResultsCache{
+ extractor: MockExtractor{},
+ minCacheExtent: 10,
+ onlyUseEntireExtent: true,
+ }
+ reqs, resps, err := s.partition(tc.input, tc.prevCachedResponse)
+ require.Nil(t, err)
+ require.Equal(t, tc.expectedRequests, reqs)
+ require.Equal(t, tc.expectedCachedResponse, resps)
+ })
+ }
+}
+
func TestHandleHit(t *testing.T) {
for _, tc := range []struct {
name string
@@ -491,6 +607,7 @@ func TestResultsCacheMaxFreshness(t *testing.T) {
},
nil,
false,
+ false,
)
require.NoError(t, err)
@@ -534,6 +651,7 @@ func Test_resultsCache_MissingData(t *testing.T) {
},
nil,
false,
+ false,
)
require.NoError(t, err)
ctx := context.Background()
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 262631643c72..13885c0fcb52 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -107,14 +107,16 @@ type Limits struct {
QueryTimeout model.Duration `yaml:"query_timeout" json:"query_timeout"`
// Query frontend enforced limits. The default is actually parameterized by the queryrange config.
- QuerySplitDuration model.Duration `yaml:"split_queries_by_interval" json:"split_queries_by_interval"`
- MetadataQuerySplitDuration model.Duration `yaml:"split_metadata_queries_by_interval" json:"split_metadata_queries_by_interval"`
- IngesterQuerySplitDuration model.Duration `yaml:"split_ingester_queries_by_interval" json:"split_ingester_queries_by_interval"`
- MinShardingLookback model.Duration `yaml:"min_sharding_lookback" json:"min_sharding_lookback"`
- MaxQueryBytesRead flagext.ByteSize `yaml:"max_query_bytes_read" json:"max_query_bytes_read"`
- MaxQuerierBytesRead flagext.ByteSize `yaml:"max_querier_bytes_read" json:"max_querier_bytes_read"`
- VolumeEnabled bool `yaml:"volume_enabled" json:"volume_enabled" doc:"description=Enable log-volume endpoints."`
- VolumeMaxSeries int `yaml:"volume_max_series" json:"volume_max_series" doc:"description=The maximum number of aggregated series in a log-volume response"`
+ QuerySplitDuration model.Duration `yaml:"split_queries_by_interval" json:"split_queries_by_interval"`
+ MetadataQuerySplitDuration model.Duration `yaml:"split_metadata_queries_by_interval" json:"split_metadata_queries_by_interval"`
+ RecentMetadataQuerySplitDuration model.Duration `yaml:"split_recent_metadata_queries_by_interval" json:"split_recent_metadata_queries_by_interval"`
+ RecentMetadataQueryWindow model.Duration `yaml:"recent_metadata_query_window" json:"recent_metadata_query_window"`
+ IngesterQuerySplitDuration model.Duration `yaml:"split_ingester_queries_by_interval" json:"split_ingester_queries_by_interval"`
+ MinShardingLookback model.Duration `yaml:"min_sharding_lookback" json:"min_sharding_lookback"`
+ MaxQueryBytesRead flagext.ByteSize `yaml:"max_query_bytes_read" json:"max_query_bytes_read"`
+ MaxQuerierBytesRead flagext.ByteSize `yaml:"max_querier_bytes_read" json:"max_querier_bytes_read"`
+ VolumeEnabled bool `yaml:"volume_enabled" json:"volume_enabled" doc:"description=Enable log-volume endpoints."`
+ VolumeMaxSeries int `yaml:"volume_max_series" json:"volume_max_series" doc:"description=The maximum number of aggregated series in a log-volume response"`
// Ruler defaults and limits.
RulerMaxRulesPerRuleGroup int `yaml:"ruler_max_rules_per_rule_group" json:"ruler_max_rules_per_rule_group"`
@@ -306,13 +308,14 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.QuerySplitDuration.Set("1h")
f.Var(&l.QuerySplitDuration, "querier.split-queries-by-interval", "Split queries by a time interval and execute in parallel. The value 0 disables splitting by time. This also determines how cache keys are chosen when result caching is enabled.")
- // with metadata caching, it is not possible to extract a subset of labels/series from a cached extent because unlike samples they are not associated with a timestamp.
- // as a result, we could return inaccurate results. example: returning results from an entire 1h extent for a 5m query
- // Setting max_metadata_cache_freshness to 24h should help us avoid caching recent data and preseve the correctness.
- // For the portion of the request beyond the freshness window, granularity of the cached metadata results is determined by split_metadata_queries_by_interval.
_ = l.MetadataQuerySplitDuration.Set("24h")
f.Var(&l.MetadataQuerySplitDuration, "querier.split-metadata-queries-by-interval", "Split metadata queries by a time interval and execute in parallel. The value 0 disables splitting metadata queries by time. This also determines how cache keys are chosen when label/series result caching is enabled.")
+ _ = l.RecentMetadataQuerySplitDuration.Set("1h")
+ f.Var(&l.RecentMetadataQuerySplitDuration, "experimental.querier.split-recent-metadata-queries-by-interval", "Experimental. Split interval to use for the portion of metadata request that falls within `recent_metadata_query_window`. Rest of the request which is outside the window still uses `split_metadata_queries_by_interval`. If set to 0, the entire request defaults to using a split interval of `split_metadata_queries_by_interval.`.")
+
+ f.Var(&l.RecentMetadataQueryWindow, "experimental.querier.recent-metadata-query-window", "Experimental. Metadata query window inside which `split_recent_metadata_queries_by_interval` gets applied, portion of the metadata request that falls in this window is split using `split_recent_metadata_queries_by_interval`. The value 0 disables using a different split interval for recent metadata queries.\n\nThis is added to improve cacheability of recent metadata queries. Query split interval also determines the interval used in cache key. The default split interval of 24h is useful for caching long queries, each cache key holding 1 day's results. But metadata queries are often shorter than 24h, to cache them effectively we need a smaller split interval. `recent_metadata_query_window` along with `split_recent_metadata_queries_by_interval` help configure a shorter split interval for recent metadata queries.")
+
_ = l.IngesterQuerySplitDuration.Set("0s")
f.Var(&l.IngesterQuerySplitDuration, "querier.split-ingester-queries-by-interval", "Interval to use for time-based splitting when a request is within the `query_ingesters_within` window; defaults to `split-queries-by-interval` by setting to 0.")
@@ -598,6 +601,16 @@ func (o *Overrides) MetadataQuerySplitDuration(userID string) time.Duration {
return time.Duration(o.getOverridesForUser(userID).MetadataQuerySplitDuration)
}
+// RecentMetadataQuerySplitDuration returns the tenant specific splitby interval for recent metadata queries.
+func (o *Overrides) RecentMetadataQuerySplitDuration(userID string) time.Duration {
+ return time.Duration(o.getOverridesForUser(userID).RecentMetadataQuerySplitDuration)
+}
+
+// RecentMetadataQueryWindow returns the tenant specific time window used to determine recent metadata queries.
+func (o *Overrides) RecentMetadataQueryWindow(userID string) time.Duration {
+ return time.Duration(o.getOverridesForUser(userID).RecentMetadataQueryWindow)
+}
+
// IngesterQuerySplitDuration returns the tenant specific splitby interval applied in the query frontend when querying
// during the `query_ingesters_within` window.
func (o *Overrides) IngesterQuerySplitDuration(userID string) time.Duration {
From 256f3971052848963fc4d4c9e24a346afbe1b32c Mon Sep 17 00:00:00 2001
From: Zirko <64951262+QuantumEnigmaa@users.noreply.github.com>
Date: Wed, 14 Feb 2024 14:24:04 +0100
Subject: [PATCH 063/130] Helm: bump chart version (#11932)
Signed-off-by: QuantumEnigmaa
Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>
---
production/helm/loki/Chart.yaml | 2 +-
production/helm/loki/README.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 183333ba652f..d3b9186bfa77 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.9.4
-version: 5.42.2
+version: 5.42.3
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 5b6cbe5a2fa1..a9766865305d 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-![Version: 5.42.2](https://img.shields.io/badge/Version-5.42.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
+![Version: 5.42.3](https://img.shields.io/badge/Version-5.42.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
Helm chart for Grafana Loki in simple, scalable mode
From 687978826f065de2d9501b55a41104ad89b3e321 Mon Sep 17 00:00:00 2001
From: Roman Danko
Date: Wed, 14 Feb 2024 15:41:37 +0100
Subject: [PATCH 064/130] Helm: Allow to define resources for GrafanaAgent pods
(#11851)
Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>
---
CHANGELOG.md | 1 +
docs/sources/setup/install/helm/reference.md | 9 +++++++++
production/helm/loki/CHANGELOG.md | 5 +++++
production/helm/loki/Chart.yaml | 2 +-
production/helm/loki/README.md | 2 +-
.../helm/loki/templates/monitoring/grafana-agent.yaml | 4 ++++
production/helm/loki/values.yaml | 7 +++++++
7 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7f091ed06f88..ca4560401243 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@
##### Enhancements
+* [11851](https://github.com/grafana/loki/pull/11851) **elcomtik**: Helm: Allow the definition of resources for GrafanaAgent pods.
* [11819](https://github.com/grafana/loki/pull/11819) **jburnham**: Ruler: Add the ability to disable the `X-Scope-OrgId` tenant identification header in remote write requests.
* [11633](https://github.com/grafana/loki/pull/11633) **cyriltovena**: Add profiling integrations to tracing instrumentation.
* [11571](https://github.com/grafana/loki/pull/11571) **MichelHollands**: Add a metrics.go log line for requests from querier to ingester
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md
index e687a560ef71..e7dbfdbdd3f6 100644
--- a/docs/sources/setup/install/helm/reference.md
+++ b/docs/sources/setup/install/helm/reference.md
@@ -2806,6 +2806,15 @@ true
null
+
+
+
+
monitoring.selfMonitoring.grafanaAgent.resources
+
object
+
Resource requests and limits for the grafanaAgent pods
+
+{}
+
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 068d37a49553..47d8f6333e4e 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,11 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+
+## 5.43.0
+
+- [ENHANCEMENT] Allow the definition of resources for GrafanaAgent pods
+
## 5.42.3
- [BUGFIX] Added condition for `egress-discovery` networkPolicies and ciliumNetworkPolicies.
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index d3b9186bfa77..ffa62c88d5cd 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.9.4
-version: 5.42.3
+version: 5.43.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index a9766865305d..5db87e6d801e 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-![Version: 5.42.3](https://img.shields.io/badge/Version-5.42.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
+![Version: 5.43.0](https://img.shields.io/badge/Version-5.43.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/templates/monitoring/grafana-agent.yaml b/production/helm/loki/templates/monitoring/grafana-agent.yaml
index 010d9604aab7..a047e5f86251 100644
--- a/production/helm/loki/templates/monitoring/grafana-agent.yaml
+++ b/production/helm/loki/templates/monitoring/grafana-agent.yaml
@@ -30,6 +30,10 @@ spec:
{{- include "loki.selectorLabels" $ | nindent 8 }}
{{- end }}
{{- end }}
+ {{- with .resources }}
+ resources:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
{{- with .tolerations }}
tolerations:
{{- toYaml . | nindent 4 }}
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index e2937af382a7..6f5a779bc811 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -671,6 +671,13 @@ monitoring:
enableConfigReadAPI: false
# -- The name of the PriorityClass for GrafanaAgent pods
priorityClassName: null
+ # -- Resource requests and limits for the grafanaAgent pods
+ resources: {}
+ # limits:
+ # memory: 200Mi
+ # requests:
+ # cpu: 50m
+ # memory: 100Mi
# -- Tolerations for GrafanaAgent pods
tolerations: []
# PodLogs configuration
From 3c06b360ecbfb7b8ca56d01d59fb69661510d9b9 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Wed, 14 Feb 2024 11:47:11 -0800
Subject: [PATCH 065/130] meta & removal support for bloom compaction (#11941)
Signed-off-by: Owen Diehl
---
pkg/bloomcompactor/bloomcompactor.go | 2 +-
pkg/bloomcompactor/controller.go | 380 ++++++++++++++----
pkg/bloomcompactor/controller_test.go | 165 +++++++-
pkg/bloomgateway/util_test.go | 4 +-
pkg/storage/bloom/v1/bounds.go | 8 +-
pkg/storage/bloom/v1/bounds_test.go | 2 +-
pkg/storage/bloom/v1/fuse_test.go | 1 -
.../stores/shipper/bloomshipper/client.go | 59 ++-
.../shipper/bloomshipper/client_test.go | 8 +-
.../shipper/bloomshipper/fetcher_test.go | 4 +-
.../stores/shipper/bloomshipper/shipper.go | 2 +-
.../shipper/bloomshipper/shipper_test.go | 2 +-
.../stores/shipper/bloomshipper/store_test.go | 4 +-
13 files changed, 526 insertions(+), 115 deletions(-)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 566b836609d1..fa3b205aa31b 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -290,7 +290,7 @@ func (c *Compactor) runWorkers(ctx context.Context, ch <-chan tenantTable) error
func (c *Compactor) compactTenantTable(ctx context.Context, tt tenantTable) error {
level.Info(c.logger).Log("msg", "compacting", "org_id", tt.tenant, "table", tt.table, "ownership", tt.ownershipRange)
- return c.controller.buildBlocks(ctx, tt.table, tt.tenant, tt.ownershipRange)
+ return c.controller.compactTenant(ctx, tt.table, tt.tenant, tt.ownershipRange)
}
type dayRangeIterator struct {
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index 8470fd9ad708..c29aec86ac5c 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -54,7 +54,20 @@ func (s *SimpleBloomController) rwFn() (v1.BlockWriter, v1.BlockReader) {
return v1.NewMemoryBlockWriter(indexBuf, bloomsBuf), v1.NewByteReader(indexBuf, bloomsBuf)
}
-func (s *SimpleBloomController) buildBlocks(
+/*
+Compaction works as follows, split across many functions for clarity:
+ 1. Fetch all meta.jsons for the given tenant and table which overlap the ownership range of this compactor.
+ 2. Load current TSDBs for this tenant/table.
+ 3. For each live TSDB (there should be only 1, but this works with multiple), find any gaps
+ (fingerprint ranges) which are not up date, determined by checking other meta.jsons and comparing
+ the tsdbs they were generated from + their ownership ranges.
+ 4. Build new bloom blocks for each gap, using the series and chunks from the TSDBs and any existing
+ blocks which overlap the gaps to accelerate bloom generation.
+ 5. Write the new blocks and metas to the store.
+ 6. Determine if any meta.jsons overlap the ownership range but are outdated, and remove them and
+ their associated blocks if so.
+*/
+func (s *SimpleBloomController) compactTenant(
ctx context.Context,
table config.DayTime,
tenant string,
@@ -62,23 +75,13 @@ func (s *SimpleBloomController) buildBlocks(
) error {
logger := log.With(s.logger, "ownership", ownershipRange, "org_id", tenant, "table", table)
- // 1. Resolve TSDBs
- tsdbs, err := s.tsdbStore.ResolveTSDBs(ctx, table, tenant)
+ client, err := s.bloomStore.Client(table.ModelTime())
if err != nil {
- level.Error(logger).Log("msg", "failed to resolve tsdbs", "err", err)
- return errors.Wrap(err, "failed to resolve tsdbs")
- }
-
- if len(tsdbs) == 0 {
- return nil
+ level.Error(logger).Log("msg", "failed to get client", "err", err, "table", table.String())
+ return errors.Wrap(err, "failed to get client")
}
- ids := make([]tsdb.Identifier, 0, len(tsdbs))
- for _, id := range tsdbs {
- ids = append(ids, id)
- }
-
- // 2. Fetch metas
+ // Fetch source metas to be used in both compaction and cleanup of out-of-date metas+blooms
metas, err := s.bloomStore.FetchMetas(
ctx,
bloomshipper.MetaSearchParams{
@@ -92,31 +95,163 @@ func (s *SimpleBloomController) buildBlocks(
return errors.Wrap(err, "failed to get metas")
}
- // 3. Determine which TSDBs have gaps in the ownership range and need to
+ // build compaction plans
+ work, err := s.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger)
+ if err != nil {
+ return errors.Wrap(err, "failed to find outdated gaps")
+ }
+
+ // build new blocks
+ built, err := s.buildGaps(ctx, tenant, table, client, work, logger)
+ if err != nil {
+ return errors.Wrap(err, "failed to build gaps")
+ }
+
+ // in order to delete outdates metas which only partially fall within the ownership range,
+ // we need to fetcha all metas in the entire bound range of the first set of metas we've resolved
+ /*
+ For instance, we have the following ownership range and we resolve `meta1` in our first Fetch call
+ because it overlaps the ownership range, we'll need to fetch newer metas that may overlap it in order
+ to check if it safely can be deleted. This falls partially outside our specific ownership range, but
+ we can safely run multiple deletes by treating their removal as idempotent.
+ |-------------ownership range-----------------|
+ |-------meta1-------|
+
+ we fetch this before possibly deleting meta1 |------|
+ */
+ superset := ownershipRange
+ for _, meta := range metas {
+ union := superset.Union(meta.Bounds)
+ if len(union) > 1 {
+ level.Error(logger).Log("msg", "meta bounds union is not a single range", "union", union)
+ return errors.New("meta bounds union is not a single range")
+ }
+ superset = union[0]
+ }
+
+ metas, err = s.bloomStore.FetchMetas(
+ ctx,
+ bloomshipper.MetaSearchParams{
+ TenantID: tenant,
+ Interval: bloomshipper.NewInterval(table.Bounds()),
+ Keyspace: superset,
+ },
+ )
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to get meta superset range", "err", err, "superset", superset)
+ return errors.Wrap(err, "failed to get meta supseret range")
+ }
+
+ // combine built and pre-existing metas
+ // in preparation for removing outdated metas
+ metas = append(metas, built...)
+
+ outdated := outdatedMetas(metas)
+ for _, meta := range outdated {
+ for _, block := range meta.Blocks {
+ if err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}); err != nil {
+ if client.IsObjectNotFoundErr(err) {
+ level.Debug(logger).Log("msg", "block not found while attempting delete, continuing", "block", block)
+ continue
+ }
+
+ level.Error(logger).Log("msg", "failed to delete blocks", "err", err)
+ return errors.Wrap(err, "failed to delete blocks")
+ }
+ }
+
+ if err := client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef}); err != nil {
+ if client.IsObjectNotFoundErr(err) {
+ level.Debug(logger).Log("msg", "meta not found while attempting delete, continuing", "meta", meta.MetaRef)
+ } else {
+ level.Error(logger).Log("msg", "failed to delete metas", "err", err)
+ return errors.Wrap(err, "failed to delete metas")
+ }
+ }
+ }
+
+ level.Debug(logger).Log("msg", "finished compaction")
+ return nil
+
+}
+
+func (s *SimpleBloomController) findOutdatedGaps(
+ ctx context.Context,
+ tenant string,
+ table config.DayTime,
+ ownershipRange v1.FingerprintBounds,
+ metas []bloomshipper.Meta,
+ logger log.Logger,
+) ([]blockPlan, error) {
+ // Resolve TSDBs
+ tsdbs, err := s.tsdbStore.ResolveTSDBs(ctx, table, tenant)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to resolve tsdbs", "err", err)
+ return nil, errors.Wrap(err, "failed to resolve tsdbs")
+ }
+
+ if len(tsdbs) == 0 {
+ return nil, nil
+ }
+
+ // Determine which TSDBs have gaps in the ownership range and need to
// be processed.
- tsdbsWithGaps, err := gapsBetweenTSDBsAndMetas(ownershipRange, ids, metas)
+ tsdbsWithGaps, err := gapsBetweenTSDBsAndMetas(ownershipRange, tsdbs, metas)
if err != nil {
level.Error(logger).Log("msg", "failed to find gaps", "err", err)
- return errors.Wrap(err, "failed to find gaps")
+ return nil, errors.Wrap(err, "failed to find gaps")
}
if len(tsdbsWithGaps) == 0 {
level.Debug(logger).Log("msg", "blooms exist for all tsdbs")
- return nil
+ return nil, nil
}
work, err := blockPlansForGaps(tsdbsWithGaps, metas)
if err != nil {
level.Error(logger).Log("msg", "failed to create plan", "err", err)
- return errors.Wrap(err, "failed to create plan")
+ return nil, errors.Wrap(err, "failed to create plan")
+ }
+
+ return work, nil
+}
+
+func (s *SimpleBloomController) loadWorkForGap(
+ ctx context.Context,
+ table config.DayTime,
+ tenant string,
+ id tsdb.Identifier,
+ gap gapWithBlocks,
+) (v1.CloseableIterator[*v1.Series], v1.CloseableIterator[*bloomshipper.CloseableBlockQuerier], error) {
+ // load a series iterator for the gap
+ seriesItr, err := s.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.bounds)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to load tsdb")
+ }
+
+ // load a blocks iterator for the gap
+ fetcher, err := s.bloomStore.Fetcher(table.ModelTime())
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to get fetcher")
+ }
+
+ blocksIter, err := newBatchedBlockLoader(ctx, fetcher, gap.blocks)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to load blocks")
}
- nGramSize := uint64(s.limits.BloomNGramLength(tenant))
- nGramSkip := uint64(s.limits.BloomNGramSkip(tenant))
- maxBlockSize := uint64(s.limits.BloomCompactorMaxBlockSize(tenant))
- blockOpts := v1.NewBlockOptions(nGramSize, nGramSkip, maxBlockSize)
+ return seriesItr, blocksIter, nil
+}
- // 4. Generate Blooms
+func (s *SimpleBloomController) buildGaps(
+ ctx context.Context,
+ tenant string,
+ table config.DayTime,
+ client bloomshipper.Client,
+ work []blockPlan,
+ logger log.Logger,
+) ([]bloomshipper.Meta, error) {
+ // Generate Blooms
// Now that we have the gaps, we will generate a bloom block for each gap.
// We can accelerate this by using existing blocks which may already contain
// needed chunks in their blooms, for instance after a new TSDB version is generated
@@ -127,19 +262,37 @@ func (s *SimpleBloomController) buildBlocks(
// accelerate bloom generation for the new blocks.
var (
- blockCt int
- tsdbCt = len(work)
+ blockCt int
+ tsdbCt = len(work)
+ nGramSize = uint64(s.limits.BloomNGramLength(tenant))
+ nGramSkip = uint64(s.limits.BloomNGramSkip(tenant))
+ maxBlockSize = uint64(s.limits.BloomCompactorMaxBlockSize(tenant))
+ blockOpts = v1.NewBlockOptions(nGramSize, nGramSkip, maxBlockSize)
+ created []bloomshipper.Meta
)
for _, plan := range work {
- for _, gap := range plan.gaps {
+ for i := range plan.gaps {
+ gap := plan.gaps[i]
+
+ meta := bloomshipper.Meta{
+ MetaRef: bloomshipper.MetaRef{
+ Ref: bloomshipper.Ref{
+ TenantID: tenant,
+ TableName: table.String(),
+ Bounds: gap.bounds,
+ },
+ },
+ Sources: []tsdb.SingleTenantTSDBIdentifier{plan.tsdb},
+ }
+
// Fetch blocks that aren't up to date but are in the desired fingerprint range
// to try and accelerate bloom creation
seriesItr, blocksIter, err := s.loadWorkForGap(ctx, table, tenant, plan.tsdb, gap)
if err != nil {
level.Error(logger).Log("msg", "failed to get series and blocks", "err", err)
- return errors.Wrap(err, "failed to get series and blocks")
+ return nil, errors.Wrap(err, "failed to get series and blocks")
}
gen := NewSimpleBloomGenerator(
@@ -159,24 +312,17 @@ func (s *SimpleBloomController) buildBlocks(
// TODO(owen-d): metrics
level.Error(logger).Log("msg", "failed to generate bloom", "err", err)
s.closeLoadedBlocks(loaded, blocksIter)
- return errors.Wrap(err, "failed to generate bloom")
- }
-
- client, err := s.bloomStore.Client(table.ModelTime())
- if err != nil {
- level.Error(logger).Log("msg", "failed to get client", "err", err)
- s.closeLoadedBlocks(loaded, blocksIter)
- return errors.Wrap(err, "failed to get client")
+ return nil, errors.Wrap(err, "failed to generate bloom")
}
for newBlocks.Next() && newBlocks.Err() == nil {
blockCt++
blk := newBlocks.At()
- built, err := bloomshipper.BlockFrom(tenant, table.Table(), blk)
+ built, err := bloomshipper.BlockFrom(tenant, table.String(), blk)
if err != nil {
level.Error(logger).Log("msg", "failed to build block", "err", err)
- return errors.Wrap(err, "failed to build block")
+ return nil, errors.Wrap(err, "failed to build block")
}
if err := client.PutBlock(
@@ -185,55 +331,153 @@ func (s *SimpleBloomController) buildBlocks(
); err != nil {
level.Error(logger).Log("msg", "failed to write block", "err", err)
s.closeLoadedBlocks(loaded, blocksIter)
- return errors.Wrap(err, "failed to write block")
+ return nil, errors.Wrap(err, "failed to write block")
}
+
+ meta.Blocks = append(meta.Blocks, built.BlockRef)
}
if err := newBlocks.Err(); err != nil {
// TODO(owen-d): metrics
level.Error(logger).Log("msg", "failed to generate bloom", "err", err)
s.closeLoadedBlocks(loaded, blocksIter)
- return errors.Wrap(err, "failed to generate bloom")
+ return nil, errors.Wrap(err, "failed to generate bloom")
}
// Close pre-existing blocks
s.closeLoadedBlocks(loaded, blocksIter)
+
+ // Write the new meta
+ ref, err := bloomshipper.MetaRefFrom(tenant, table.String(), gap.bounds, meta.Sources, meta.Blocks)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to checksum meta", "err", err)
+ return nil, errors.Wrap(err, "failed to checksum meta")
+ }
+ meta.MetaRef = ref
+
+ if err := client.PutMeta(ctx, meta); err != nil {
+ level.Error(logger).Log("msg", "failed to write meta", "err", err)
+ return nil, errors.Wrap(err, "failed to write meta")
+ }
+ created = append(created, meta)
}
}
- // TODO(owen-d): build meta from blocks
- // TODO(owen-d): reap tombstones, old metas
-
level.Debug(logger).Log("msg", "finished bloom generation", "blocks", blockCt, "tsdbs", tsdbCt)
- return nil
-
+ return created, nil
}
-func (s *SimpleBloomController) loadWorkForGap(
- ctx context.Context,
- table config.DayTime,
- tenant string,
- id tsdb.Identifier,
- gap gapWithBlocks,
-) (v1.CloseableIterator[*v1.Series], v1.CloseableIterator[*bloomshipper.CloseableBlockQuerier], error) {
- // load a series iterator for the gap
- seriesItr, err := s.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.bounds)
- if err != nil {
- return nil, nil, errors.Wrap(err, "failed to load tsdb")
+// outdatedMetas returns metas that are outdated and need to be removed,
+// determined by if their entire ownership range is covered by other metas with newer
+// TSDBs
+func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta) {
+ // first, ensure data is sorted so we can take advantage of that
+ sort.Slice(metas, func(i, j int) bool {
+ return metas[i].Bounds.Less(metas[j].Bounds)
+ })
+
+ // NB(owen-d): time complexity shouldn't be a problem
+ // given the number of metas should be low (famous last words, i know).
+ for i := range metas {
+ a := metas[i]
+
+ var overlaps []v1.FingerprintBounds
+
+ for j := range metas {
+ if j == i {
+ continue
+ }
+
+ b := metas[j]
+ intersection := a.Bounds.Intersection(b.Bounds)
+ if intersection == nil {
+ if a.Bounds.Cmp(b.Bounds.Min) == v1.After {
+ // All subsequent metas will be newer, so we can break
+ break
+ }
+ // otherwise, just check the next meta
+ continue
+ }
+
+ // we can only remove older data, not data which may be newer
+ if !tsdbsStrictlyNewer(b.Sources, a.Sources) {
+ continue
+ }
+
+ // because we've sorted the metas, we only have to test overlaps against the last
+ // overlap we found (if any)
+ if len(overlaps) == 0 {
+ overlaps = append(overlaps, *intersection)
+ continue
+ }
+
+ // best effort at merging overlaps first pass
+ last := overlaps[len(overlaps)-1]
+ overlaps = append(overlaps[:len(overlaps)-1], last.Union(*intersection)...)
+
+ }
+
+ if coversFullRange(a.Bounds, overlaps) {
+ outdated = append(outdated, a)
+ }
}
+ return
+}
- // load a blocks iterator for the gap
- fetcher, err := s.bloomStore.Fetcher(table.ModelTime())
- if err != nil {
- return nil, nil, errors.Wrap(err, "failed to get fetcher")
+func coversFullRange(bounds v1.FingerprintBounds, overlaps []v1.FingerprintBounds) bool {
+ // if there are no overlaps, the range is not covered
+ if len(overlaps) == 0 {
+ return false
}
- blocksIter, err := newBatchedBlockLoader(ctx, fetcher, gap.blocks)
- if err != nil {
- return nil, nil, errors.Wrap(err, "failed to load blocks")
+ // keep track of bounds which need to be filled in order
+ // for the overlaps to cover the full range
+ missing := []v1.FingerprintBounds{bounds}
+ ignores := make(map[int]bool)
+ for _, overlap := range overlaps {
+ var i int
+ for {
+ if i >= len(missing) {
+ break
+ }
+
+ if ignores[i] {
+ i++
+ continue
+ }
+
+ remaining := missing[i].Unless(overlap)
+ switch len(remaining) {
+ case 0:
+ // this range is covered, ignore it
+ ignores[i] = true
+ case 1:
+ // this range is partially covered, updated it
+ missing[i] = remaining[0]
+ case 2:
+ // this range has been partially covered in the middle,
+ // split it into two ranges and append
+ ignores[i] = true
+ missing = append(missing, remaining...)
+ }
+ i++
+ }
+
}
- return seriesItr, blocksIter, nil
+ return len(ignores) == len(missing)
+}
+
+// tsdbStrictlyNewer returns if all of the tsdbs in a are newer than all of the tsdbs in b
+func tsdbsStrictlyNewer(as, bs []tsdb.SingleTenantTSDBIdentifier) bool {
+ for _, a := range as {
+ for _, b := range bs {
+ if a.TS.Before(b.TS) {
+ return false
+ }
+ }
+ }
+ return true
}
func (s *SimpleBloomController) closeLoadedBlocks(toClose []io.Closer, it v1.CloseableIterator[*bloomshipper.CloseableBlockQuerier]) {
@@ -275,7 +519,7 @@ type gapWithBlocks struct {
// of the same chunks we need to ensure are indexed, just from previous tsdb iterations.
// This is a performance optimization to avoid expensive re-reindexing
type blockPlan struct {
- tsdb tsdb.Identifier
+ tsdb tsdb.SingleTenantTSDBIdentifier
gaps []gapWithBlocks
}
@@ -353,7 +597,7 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan
// Used to signal the gaps that need to be populated for a tsdb
type tsdbGaps struct {
- tsdb tsdb.Identifier
+ tsdb tsdb.SingleTenantTSDBIdentifier
gaps []v1.FingerprintBounds
}
@@ -361,7 +605,7 @@ type tsdbGaps struct {
// that for each TSDB, there are metas covering the entire ownership range which were generated from that specific TSDB.
func gapsBetweenTSDBsAndMetas(
ownershipRange v1.FingerprintBounds,
- tsdbs []tsdb.Identifier,
+ tsdbs []tsdb.SingleTenantTSDBIdentifier,
metas []bloomshipper.Meta,
) (res []tsdbGaps, err error) {
for _, db := range tsdbs {
diff --git a/pkg/bloomcompactor/controller_test.go b/pkg/bloomcompactor/controller_test.go
index 0660a5b601ee..72653c292b18 100644
--- a/pkg/bloomcompactor/controller_test.go
+++ b/pkg/bloomcompactor/controller_test.go
@@ -142,14 +142,14 @@ func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
err bool
exp []tsdbGaps
ownershipRange v1.FingerprintBounds
- tsdbs []tsdb.Identifier
+ tsdbs []tsdb.SingleTenantTSDBIdentifier
metas []bloomshipper.Meta
}{
{
desc: "non-overlapping tsdbs and metas",
err: true,
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
metas: []bloomshipper.Meta{
genMeta(11, 20, []int{0}, nil),
},
@@ -157,7 +157,7 @@ func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
{
desc: "single tsdb",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
metas: []bloomshipper.Meta{
genMeta(4, 8, []int{0}, nil),
},
@@ -174,7 +174,7 @@ func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
{
desc: "multiple tsdbs with separate blocks",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0), tsdbID(1)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)},
metas: []bloomshipper.Meta{
genMeta(0, 5, []int{0}, nil),
genMeta(6, 10, []int{1}, nil),
@@ -197,7 +197,7 @@ func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
{
desc: "multiple tsdbs with the same blocks",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0), tsdbID(1)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)},
metas: []bloomshipper.Meta{
genMeta(0, 5, []int{0, 1}, nil),
genMeta(6, 8, []int{1}, nil),
@@ -242,7 +242,7 @@ func Test_blockPlansForGaps(t *testing.T) {
for _, tc := range []struct {
desc string
ownershipRange v1.FingerprintBounds
- tsdbs []tsdb.Identifier
+ tsdbs []tsdb.SingleTenantTSDBIdentifier
metas []bloomshipper.Meta
err bool
exp []blockPlan
@@ -250,7 +250,7 @@ func Test_blockPlansForGaps(t *testing.T) {
{
desc: "single overlapping meta+no overlapping block",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
metas: []bloomshipper.Meta{
genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(11, 20)}),
},
@@ -268,7 +268,7 @@ func Test_blockPlansForGaps(t *testing.T) {
{
desc: "single overlapping meta+one overlapping block",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
metas: []bloomshipper.Meta{
genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}),
},
@@ -290,7 +290,7 @@ func Test_blockPlansForGaps(t *testing.T) {
// but we can trim the range needing generation
desc: "trims up to date area",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
metas: []bloomshipper.Meta{
genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb
genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for different tsdb
@@ -309,7 +309,7 @@ func Test_blockPlansForGaps(t *testing.T) {
{
desc: "uses old block for overlapping range",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
metas: []bloomshipper.Meta{
genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb
genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(5, 20)}), // block for different tsdb
@@ -329,7 +329,7 @@ func Test_blockPlansForGaps(t *testing.T) {
{
desc: "multi case",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0), tsdbID(1)}, // generate for both tsdbs
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)}, // generate for both tsdbs
metas: []bloomshipper.Meta{
genMeta(0, 2, []int{0}, []bloomshipper.BlockRef{
genBlockRef(0, 1),
@@ -377,7 +377,7 @@ func Test_blockPlansForGaps(t *testing.T) {
{
desc: "dedupes block refs",
ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.Identifier{tsdbID(0)},
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
metas: []bloomshipper.Meta{
genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{
genBlockRef(1, 4),
@@ -421,3 +421,144 @@ func Test_blockPlansForGaps(t *testing.T) {
})
}
}
+
+func Test_coversFullRange(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ src v1.FingerprintBounds
+ overlaps []v1.FingerprintBounds
+ exp bool
+ }{
+ {
+ desc: "empty",
+ src: v1.NewBounds(0, 10),
+ overlaps: []v1.FingerprintBounds{},
+ exp: false,
+ },
+ {
+ desc: "single_full_range",
+ src: v1.NewBounds(0, 10),
+ overlaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 10),
+ },
+ exp: true,
+ },
+ {
+ desc: "single_partial_range",
+ src: v1.NewBounds(0, 10),
+ overlaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 5),
+ },
+ exp: false,
+ },
+ {
+ desc: "multiple_full_ranges",
+ src: v1.NewBounds(0, 10),
+ overlaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 5),
+ v1.NewBounds(6, 10),
+ },
+ exp: true,
+ },
+ {
+ desc: "multiple_partial_ranges",
+ src: v1.NewBounds(0, 10),
+ overlaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 5),
+ v1.NewBounds(7, 8),
+ },
+ exp: false,
+ },
+ {
+ desc: "wraps_partial_range",
+ src: v1.NewBounds(10, 20),
+ overlaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 12),
+ v1.NewBounds(13, 15),
+ v1.NewBounds(19, 21),
+ },
+ exp: false,
+ },
+ {
+ desc: "wraps_full_range",
+ src: v1.NewBounds(10, 20),
+ overlaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 12),
+ v1.NewBounds(13, 15),
+ v1.NewBounds(16, 25),
+ },
+ exp: true,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ require.Equal(t, tc.exp, coversFullRange(tc.src, tc.overlaps))
+ })
+ }
+}
+
+func Test_OutdatedMetas(t *testing.T) {
+ gen := func(bounds v1.FingerprintBounds, tsdbTimes ...model.Time) (meta bloomshipper.Meta) {
+ for _, tsdbTime := range tsdbTimes {
+ meta.Sources = append(meta.Sources, tsdb.SingleTenantTSDBIdentifier{TS: tsdbTime.Time()})
+ }
+ meta.Bounds = bounds
+ return meta
+ }
+
+ for _, tc := range []struct {
+ desc string
+ metas []bloomshipper.Meta
+ exp []bloomshipper.Meta
+ }{
+ {
+ desc: "no metas",
+ metas: nil,
+ exp: nil,
+ },
+ {
+ desc: "single meta",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 10), 0),
+ },
+ exp: nil,
+ },
+ {
+ desc: "single outdated meta",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 10), 0),
+ gen(v1.NewBounds(0, 10), 1),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 10), 0),
+ },
+ },
+ {
+ desc: "single outdated via partitions",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ gen(v1.NewBounds(6, 10), 0),
+ gen(v1.NewBounds(0, 10), 1),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ gen(v1.NewBounds(6, 10), 0),
+ },
+ },
+ {
+ desc: "multi tsdbs",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0, 1),
+ gen(v1.NewBounds(6, 10), 0, 1),
+ gen(v1.NewBounds(0, 10), 2, 3),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0, 1),
+ gen(v1.NewBounds(6, 10), 0, 1),
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ require.Equal(t, tc.exp, outdatedMetas(tc.metas))
+ })
+ }
+}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 5f4d254e8f04..6850cf31811c 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -323,8 +323,8 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
MetaRef: bloomshipper.MetaRef{
Ref: ref,
},
- Tombstones: []bloomshipper.BlockRef{},
- Blocks: []bloomshipper.BlockRef{blockRef},
+ BlockTombstones: []bloomshipper.BlockRef{},
+ Blocks: []bloomshipper.BlockRef{blockRef},
}
block, data, _ := v1.MakeBlock(t, n, fromFp, throughFp, from, through)
// Printing fingerprints and the log lines of its chunks comes handy for debugging...
diff --git a/pkg/storage/bloom/v1/bounds.go b/pkg/storage/bloom/v1/bounds.go
index 507f6e45aefc..8f3edd914209 100644
--- a/pkg/storage/bloom/v1/bounds.go
+++ b/pkg/storage/bloom/v1/bounds.go
@@ -125,10 +125,10 @@ func (b FingerprintBounds) Intersection(target FingerprintBounds) *FingerprintBo
// Union returns the union of the two bounds
func (b FingerprintBounds) Union(target FingerprintBounds) (res []FingerprintBounds) {
if !b.Overlaps(target) {
- if b.Cmp(target.Min) == Before {
- return []FingerprintBounds{target, b}
+ if b.Less(target) {
+ return []FingerprintBounds{b, target}
}
- return []FingerprintBounds{b, target}
+ return []FingerprintBounds{target, b}
}
return []FingerprintBounds{
@@ -145,7 +145,7 @@ func (b FingerprintBounds) Unless(target FingerprintBounds) (res []FingerprintBo
return []FingerprintBounds{b}
}
- if b == target {
+ if b.Within(target) {
return nil
}
diff --git a/pkg/storage/bloom/v1/bounds_test.go b/pkg/storage/bloom/v1/bounds_test.go
index 629eac61a2af..e8362a2b283f 100644
--- a/pkg/storage/bloom/v1/bounds_test.go
+++ b/pkg/storage/bloom/v1/bounds_test.go
@@ -98,7 +98,7 @@ func Test_FingerprintBounds_Union(t *testing.T) {
}, NewBounds(5, 15).Union(target))
}
-func Test_FingerprintBounds_Xor(t *testing.T) {
+func Test_FingerprintBounds_Unless(t *testing.T) {
t.Parallel()
target := NewBounds(10, 20)
assert.Equal(t, []FingerprintBounds{
diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go
index 2ab0a51bced2..1e35895794c2 100644
--- a/pkg/storage/bloom/v1/fuse_test.go
+++ b/pkg/storage/bloom/v1/fuse_test.go
@@ -13,7 +13,6 @@ import (
)
func TestFusedQuerier(t *testing.T) {
- t.Parallel()
// references for linking in memory reader+writer
indexBuf := bytes.NewBuffer(nil)
bloomsBuf := bytes.NewBuffer(nil)
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 1dbfac579c5a..882b0eab41c2 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -88,45 +88,66 @@ type Meta struct {
// The specific TSDB files used to generate the block.
Sources []tsdb.SingleTenantTSDBIdentifier
+ // TODO(owen-d): remove, unused
// Old blocks which can be deleted in the future. These should be from previous compaction rounds.
- Tombstones []BlockRef
+ BlockTombstones []BlockRef
// A list of blocks that were generated
Blocks []BlockRef
}
-// TODO(owen-d): use this to update internal ref's checksum.
-func (m Meta) Checksum() (uint32, error) {
+func MetaRefFrom(
+ tenant,
+ table string,
+ bounds v1.FingerprintBounds,
+ sources []tsdb.SingleTenantTSDBIdentifier,
+ blocks []BlockRef,
+) (MetaRef, error) {
+
h := v1.Crc32HashPool.Get()
defer v1.Crc32HashPool.Put(h)
- err := m.Bounds.Hash(h)
+ err := bounds.Hash(h)
if err != nil {
- return 0, errors.Wrap(err, "writing OwnershipRange")
+ return MetaRef{}, errors.Wrap(err, "writing OwnershipRange")
}
- for _, tombstone := range m.Tombstones {
- err = tombstone.Hash(h)
+ for _, source := range sources {
+ err = source.Hash(h)
if err != nil {
- return 0, errors.Wrap(err, "writing Tombstones")
+ return MetaRef{}, errors.Wrap(err, "writing Sources")
}
}
- for _, source := range m.Sources {
- err = source.Hash(h)
- if err != nil {
- return 0, errors.Wrap(err, "writing Sources")
+ var (
+ start, end model.Time
+ )
+
+ for i, block := range blocks {
+ if i == 0 || block.StartTimestamp.Before(start) {
+ start = block.StartTimestamp
+ }
+
+ if block.EndTimestamp.After(end) {
+ end = block.EndTimestamp
}
- }
- for _, block := range m.Blocks {
err = block.Hash(h)
if err != nil {
- return 0, errors.Wrap(err, "writing Blocks")
+ return MetaRef{}, errors.Wrap(err, "writing Blocks")
}
}
- return h.Sum32(), nil
+ return MetaRef{
+ Ref: Ref{
+ TenantID: tenant,
+ TableName: table,
+ Bounds: bounds,
+ StartTimestamp: start,
+ EndTimestamp: end,
+ Checksum: h.Sum32(),
+ },
+ }, nil
}
@@ -200,6 +221,7 @@ type BlockClient interface {
type Client interface {
MetaClient
BlockClient
+ IsObjectNotFoundErr(err error) bool
Stop()
}
@@ -224,6 +246,10 @@ func NewBloomClient(cfg bloomStoreConfig, client client.ObjectClient, logger log
}, nil
}
+func (b *BloomClient) IsObjectNotFoundErr(err error) bool {
+ return b.client.IsObjectNotFoundErr(err)
+}
+
func (b *BloomClient) PutMeta(ctx context.Context, meta Meta) error {
data, err := json.Marshal(meta)
if err != nil {
@@ -300,6 +326,7 @@ func (b *BloomClient) DeleteBlocks(ctx context.Context, references []BlockRef) e
ref := references[idx]
key := b.Block(ref).Addr()
err := b.client.DeleteObject(ctx, key)
+
if err != nil {
return fmt.Errorf("error deleting block file: %w", err)
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go
index 02d80429d7f1..897ed519946a 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go
@@ -63,8 +63,8 @@ func putMeta(c *BloomClient, tenant string, start model.Time, minFp, maxFp model
// EndTimestamp: start.Add(12 * time.Hour),
},
},
- Blocks: []BlockRef{},
- Tombstones: []BlockRef{},
+ Blocks: []BlockRef{},
+ BlockTombstones: []BlockRef{},
}
raw, _ := json.Marshal(meta)
return meta, c.client.PutObject(context.Background(), c.Meta(meta.MetaRef).Addr(), bytes.NewReader(raw))
@@ -129,8 +129,8 @@ func TestBloomClient_PutMeta(t *testing.T) {
// EndTimestamp: start.Add(12 * time.Hour),
},
},
- Blocks: []BlockRef{},
- Tombstones: []BlockRef{},
+ Blocks: []BlockRef{},
+ BlockTombstones: []BlockRef{},
}
err := c.PutMeta(ctx, meta)
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
index fd1e6157a1ca..40a695e0b8e6 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
@@ -34,8 +34,8 @@ func makeMetas(t *testing.T, schemaCfg config.SchemaConfig, ts model.Time, keysp
EndTimestamp: ts,
},
},
- Tombstones: []BlockRef{},
- Blocks: []BlockRef{},
+ BlockTombstones: []BlockRef{},
+ Blocks: []BlockRef{},
}
}
return metas
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go
index dee8f2464bcb..fd755b0a204a 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go
@@ -60,7 +60,7 @@ func BlocksForMetas(metas []Meta, interval Interval, keyspaces []v1.FingerprintB
blocks := make(map[BlockRef]bool) // block -> isTombstoned
for _, meta := range metas {
- for _, tombstone := range meta.Tombstones {
+ for _, tombstone := range meta.BlockTombstones {
blocks[tombstone] = true
}
for _, block := range meta.Blocks {
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
index a3a9442ddfb0..c9e47f91fea2 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
@@ -31,7 +31,7 @@ func TestBloomShipper_findBlocks(t *testing.T) {
},
},
{
- Tombstones: []BlockRef{
+ BlockTombstones: []BlockRef{
createMatchingBlockRef(1),
createMatchingBlockRef(3),
},
diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go
index 56179cde97c3..ca86cb94fa96 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go
@@ -83,8 +83,8 @@ func createMetaInStorage(store *BloomStore, tenant string, start model.Time, min
// EndTimestamp: start.Add(12 * time.Hour),
},
},
- Blocks: []BlockRef{},
- Tombstones: []BlockRef{},
+ Blocks: []BlockRef{},
+ BlockTombstones: []BlockRef{},
}
err := store.storeDo(start, func(s *bloomStoreEntry) error {
raw, _ := json.Marshal(meta)
From bd12e163390f8f85fc04a1deba6140f46d726c1b Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Wed, 14 Feb 2024 12:10:24 -0800
Subject: [PATCH 066/130] [Blooms] Use correct table address function (#11955)
---
pkg/bloomcompactor/controller.go | 8 ++++----
pkg/bloomcompactor/table_utils.go | 16 ----------------
pkg/bloomcompactor/tsdb.go | 6 +++---
pkg/bloomgateway/util_test.go | 2 +-
pkg/storage/config/schema_config.go | 4 +++-
5 files changed, 11 insertions(+), 25 deletions(-)
delete mode 100644 pkg/bloomcompactor/table_utils.go
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index c29aec86ac5c..cc801dc27e55 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -77,7 +77,7 @@ func (s *SimpleBloomController) compactTenant(
client, err := s.bloomStore.Client(table.ModelTime())
if err != nil {
- level.Error(logger).Log("msg", "failed to get client", "err", err, "table", table.String())
+ level.Error(logger).Log("msg", "failed to get client", "err", err, "table", table.Addr())
return errors.Wrap(err, "failed to get client")
}
@@ -280,7 +280,7 @@ func (s *SimpleBloomController) buildGaps(
MetaRef: bloomshipper.MetaRef{
Ref: bloomshipper.Ref{
TenantID: tenant,
- TableName: table.String(),
+ TableName: table.Addr(),
Bounds: gap.bounds,
},
},
@@ -319,7 +319,7 @@ func (s *SimpleBloomController) buildGaps(
blockCt++
blk := newBlocks.At()
- built, err := bloomshipper.BlockFrom(tenant, table.String(), blk)
+ built, err := bloomshipper.BlockFrom(tenant, table.Addr(), blk)
if err != nil {
level.Error(logger).Log("msg", "failed to build block", "err", err)
return nil, errors.Wrap(err, "failed to build block")
@@ -348,7 +348,7 @@ func (s *SimpleBloomController) buildGaps(
s.closeLoadedBlocks(loaded, blocksIter)
// Write the new meta
- ref, err := bloomshipper.MetaRefFrom(tenant, table.String(), gap.bounds, meta.Sources, meta.Blocks)
+ ref, err := bloomshipper.MetaRefFrom(tenant, table.Addr(), gap.bounds, meta.Sources, meta.Blocks)
if err != nil {
level.Error(logger).Log("msg", "failed to checksum meta", "err", err)
return nil, errors.Wrap(err, "failed to checksum meta")
diff --git a/pkg/bloomcompactor/table_utils.go b/pkg/bloomcompactor/table_utils.go
deleted file mode 100644
index 55bc2e9a328f..000000000000
--- a/pkg/bloomcompactor/table_utils.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package bloomcompactor
-
-import (
- "github.com/prometheus/common/model"
-
- "github.com/grafana/loki/pkg/compactor/retention"
-)
-
-func getIntervalsForTables(tables []string) map[string]model.Interval {
- tablesIntervals := make(map[string]model.Interval, len(tables))
- for _, table := range tables {
- tablesIntervals[table] = retention.ExtractIntervalFromTableName(table)
- }
-
- return tablesIntervals
-}
diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go
index ad7b2eafac4c..d19e185a9275 100644
--- a/pkg/bloomcompactor/tsdb.go
+++ b/pkg/bloomcompactor/tsdb.go
@@ -50,12 +50,12 @@ func NewBloomTSDBStore(storage storage.Client) *BloomTSDBStore {
}
func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table config.DayTime) ([]string, error) {
- _, users, err := b.storage.ListFiles(ctx, table.Table(), true) // bypass cache for ease of testing
+ _, users, err := b.storage.ListFiles(ctx, table.Addr(), true) // bypass cache for ease of testing
return users, err
}
func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTime, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
- indices, err := b.storage.ListUserFiles(ctx, table.Table(), tenant, true) // bypass cache for ease of testing
+ indices, err := b.storage.ListUserFiles(ctx, table.Addr(), tenant, true) // bypass cache for ease of testing
if err != nil {
return nil, errors.Wrap(err, "failed to list user files")
}
@@ -87,7 +87,7 @@ func (b *BloomTSDBStore) LoadTSDB(
) (v1.CloseableIterator[*v1.Series], error) {
withCompression := id.Name() + gzipExtension
- data, err := b.storage.GetUserFile(ctx, table.Table(), tenant, withCompression)
+ data, err := b.storage.GetUserFile(ctx, table.Addr(), tenant, withCompression)
if err != nil {
return nil, errors.Wrap(err, "failed to get file")
}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 6850cf31811c..9b5ce6e897bb 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -311,7 +311,7 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
}
ref := bloomshipper.Ref{
TenantID: tenant,
- TableName: config.NewDayTime(truncateDay(from)).Table(),
+ TableName: config.NewDayTime(truncateDay(from)).Addr(),
Bounds: v1.NewBounds(fromFp, throughFp),
StartTimestamp: from,
EndTimestamp: through,
diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
index 30b9de98b14b..b7c92c62c3d9 100644
--- a/pkg/storage/config/schema_config.go
+++ b/pkg/storage/config/schema_config.go
@@ -237,7 +237,9 @@ func (d DayTime) String() string {
return d.Time.Time().UTC().Format("2006-01-02")
}
-func (d DayTime) Table() string {
+// Addr returns the unix day offset as a string, which is used
+// as the address for the index table in storage.
+func (d DayTime) Addr() string {
return fmt.Sprintf("%d",
d.ModelTime().Time().UnixNano()/int64(ObjectStorageIndexRequiredPeriod))
}
From 6434df73459f91676ca2463313a354fb9cb6d3d9 Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Thu, 15 Feb 2024 00:58:26 +0100
Subject: [PATCH 067/130] Bloom compactor shuffle-sharding (#11947)
---
pkg/bloomcompactor/bloomcompactor.go | 38 ++++-
pkg/bloomcompactor/bloomcompactor_test.go | 197 ++++++++++++++++++++++
pkg/bloomcompactor/sharding.go | 58 -------
pkg/bloomcompactor/sharding_test.go | 149 ----------------
pkg/bloomutils/ring.go | 23 ++-
pkg/bloomutils/ring_test.go | 19 +--
pkg/loki/modules.go | 3 +-
pkg/util/ring/sharding.go | 73 ++------
8 files changed, 259 insertions(+), 301 deletions(-)
create mode 100644 pkg/bloomcompactor/bloomcompactor_test.go
delete mode 100644 pkg/bloomcompactor/sharding.go
delete mode 100644 pkg/bloomcompactor/sharding_test.go
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index fa3b205aa31b..36dcf36ed1fb 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -2,7 +2,6 @@ package bloomcompactor
import (
"context"
- "math"
"sync"
"time"
@@ -11,16 +10,23 @@ import (
"github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/concurrency"
"github.com/grafana/dskit/multierror"
+ "github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
+ "github.com/grafana/loki/pkg/bloomutils"
"github.com/grafana/loki/pkg/storage"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
+ util_ring "github.com/grafana/loki/pkg/util/ring"
+)
+
+var (
+ RingOp = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE}, nil)
)
/*
@@ -47,7 +53,7 @@ type Compactor struct {
// temporary workaround until bloomStore has implemented read/write shipper interface
bloomStore bloomshipper.Store
- sharding ShardingStrategy
+ sharding util_ring.TenantSharding
metrics *Metrics
btMetrics *v1.Metrics
@@ -59,7 +65,7 @@ func New(
storeCfg storage.Config,
clientMetrics storage.ClientMetrics,
fetcherProvider stores.ChunkFetcherProvider,
- sharding ShardingStrategy,
+ sharding util_ring.TenantSharding,
limits Limits,
logger log.Logger,
r prometheus.Registerer,
@@ -182,9 +188,24 @@ func (c *Compactor) tenants(ctx context.Context, table config.DayTime) (v1.Itera
return v1.NewSliceIter(tenants), nil
}
-// TODO(owen-d): implement w/ subrings
-func (c *Compactor) ownsTenant(_ string) (ownershipRange v1.FingerprintBounds, owns bool) {
- return v1.NewBounds(0, math.MaxUint64), true
+// ownsTenant returns the ownership range for the tenant, if the compactor owns the tenant, and an error.
+func (c *Compactor) ownsTenant(tenant string) (v1.FingerprintBounds, bool, error) {
+ tenantRing, owned := c.sharding.OwnsTenant(tenant)
+ if !owned {
+ return v1.FingerprintBounds{}, false, nil
+ }
+
+ rs, err := tenantRing.GetAllHealthy(RingOp)
+ if err != nil {
+ return v1.FingerprintBounds{}, false, errors.Wrap(err, "getting ring healthy instances")
+
+ }
+
+ ownershipBounds, err := bloomutils.GetInstanceWithTokenRange(c.cfg.Ring.InstanceID, rs.Instances)
+ if err != nil {
+ return v1.FingerprintBounds{}, false, errors.Wrap(err, "getting instance token range")
+ }
+ return ownershipBounds, true, nil
}
// runs a single round of compaction for all relevant tenants and tables
@@ -232,7 +253,10 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil {
tenant := tenants.At()
- ownershipRange, owns := c.ownsTenant(tenant)
+ ownershipRange, owns, err := c.ownsTenant(tenant)
+ if err != nil {
+ return errors.Wrap(err, "checking tenant ownership")
+ }
if !owns {
continue
}
diff --git a/pkg/bloomcompactor/bloomcompactor_test.go b/pkg/bloomcompactor/bloomcompactor_test.go
new file mode 100644
index 000000000000..475ba8ec0585
--- /dev/null
+++ b/pkg/bloomcompactor/bloomcompactor_test.go
@@ -0,0 +1,197 @@
+package bloomcompactor
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "math"
+ "testing"
+ "time"
+
+ "github.com/grafana/dskit/services"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ util_log "github.com/grafana/loki/pkg/util/log"
+ lokiring "github.com/grafana/loki/pkg/util/ring"
+ util_ring "github.com/grafana/loki/pkg/util/ring"
+ "github.com/grafana/loki/pkg/validation"
+)
+
+func TestCompactor_ownsTenant(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ limits Limits
+ compactors int
+
+ expectedCompactorsOwningTenant int
+ }{
+ {
+ name: "no sharding with one instance",
+ limits: mockLimits{
+ shardSize: 0,
+ },
+ compactors: 1,
+ expectedCompactorsOwningTenant: 1,
+ },
+ {
+ name: "no sharding with multiple instances",
+ limits: mockLimits{
+ shardSize: 0,
+ },
+ compactors: 10,
+ expectedCompactorsOwningTenant: 10,
+ },
+ {
+ name: "sharding with one instance",
+ limits: mockLimits{
+ shardSize: 5,
+ },
+ compactors: 1,
+ expectedCompactorsOwningTenant: 1,
+ },
+ {
+ name: "sharding with multiple instances",
+ limits: mockLimits{
+ shardSize: 5,
+ },
+ compactors: 10,
+ expectedCompactorsOwningTenant: 5,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ var ringManagers []*lokiring.RingManager
+ var compactors []*Compactor
+ for i := 0; i < tc.compactors; i++ {
+ var ringCfg lokiring.RingConfig
+ ringCfg.RegisterFlagsWithPrefix("", "", flag.NewFlagSet("ring", flag.PanicOnError))
+ ringCfg.KVStore.Store = "inmemory"
+ ringCfg.InstanceID = fmt.Sprintf("bloom-compactor-%d", i)
+ ringCfg.InstanceAddr = fmt.Sprintf("localhost-%d", i)
+
+ ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, ringCfg, 1, 1, util_log.Logger, prometheus.NewRegistry())
+ require.NoError(t, err)
+ require.NoError(t, ringManager.StartAsync(context.Background()))
+
+ shuffleSharding := util_ring.NewTenantShuffleSharding(ringManager.Ring, ringManager.RingLifecycler, tc.limits.BloomCompactorShardSize)
+
+ compactor := &Compactor{
+ cfg: Config{
+ Ring: ringCfg,
+ },
+ sharding: shuffleSharding,
+ limits: tc.limits,
+ }
+
+ ringManagers = append(ringManagers, ringManager)
+ compactors = append(compactors, compactor)
+ }
+ defer func() {
+ // Stop all rings and wait for them to stop.
+ for _, ringManager := range ringManagers {
+ ringManager.StopAsync()
+ require.Eventually(t, func() bool {
+ return ringManager.State() == services.Terminated
+ }, 1*time.Minute, 100*time.Millisecond)
+ }
+ }()
+
+ // Wait for all rings to see each other.
+ for _, ringManager := range ringManagers {
+ require.Eventually(t, func() bool {
+ running := ringManager.State() == services.Running
+ discovered := ringManager.Ring.InstancesCount() == tc.compactors
+ return running && discovered
+ }, 1*time.Minute, 100*time.Millisecond)
+ }
+
+ var compactorOwnsTenant int
+ var compactorOwnershipRange []v1.FingerprintBounds
+ for _, compactor := range compactors {
+ ownershipRange, ownsTenant, err := compactor.ownsTenant("tenant")
+ require.NoError(t, err)
+ if ownsTenant {
+ compactorOwnsTenant++
+ compactorOwnershipRange = append(compactorOwnershipRange, ownershipRange)
+ }
+ }
+ require.Equal(t, tc.expectedCompactorsOwningTenant, compactorOwnsTenant)
+
+ coveredKeySpace := v1.NewBounds(math.MaxUint64, 0)
+ for i, boundsA := range compactorOwnershipRange {
+ for j, boundsB := range compactorOwnershipRange {
+ if i == j {
+ continue
+ }
+ // Assert that the fingerprint key-space is not overlapping
+ require.False(t, boundsA.Overlaps(boundsB))
+ }
+
+ if boundsA.Min < coveredKeySpace.Min {
+ coveredKeySpace.Min = boundsA.Min
+ }
+ if boundsA.Max > coveredKeySpace.Max {
+ coveredKeySpace.Max = boundsA.Max
+ }
+
+ // Assert that the fingerprint key-space is evenly distributed across the compactors
+ // We do some adjustments if the key-space is not evenly distributable, so we use a delta of 10
+ // to account for that and check that the key-space is reasonably evenly distributed.
+ fpPerTenant := math.MaxUint64 / uint64(tc.expectedCompactorsOwningTenant)
+ boundsLen := uint64(boundsA.Max - boundsA.Min)
+ require.InDelta(t, fpPerTenant, boundsLen, 10)
+ }
+ // Assert that the fingerprint key-space is complete
+ require.True(t, coveredKeySpace.Equal(v1.NewBounds(0, math.MaxUint64)))
+ })
+ }
+}
+
+type mockLimits struct {
+ shardSize int
+}
+
+func (m mockLimits) AllByUserID() map[string]*validation.Limits {
+ panic("implement me")
+}
+
+func (m mockLimits) DefaultLimits() *validation.Limits {
+ panic("implement me")
+}
+
+func (m mockLimits) VolumeMaxSeries(_ string) int {
+ panic("implement me")
+}
+
+func (m mockLimits) BloomCompactorShardSize(_ string) int {
+ return m.shardSize
+}
+
+func (m mockLimits) BloomCompactorChunksBatchSize(_ string) int {
+ panic("implement me")
+}
+
+func (m mockLimits) BloomCompactorMaxTableAge(_ string) time.Duration {
+ panic("implement me")
+}
+
+func (m mockLimits) BloomCompactorEnabled(_ string) bool {
+ panic("implement me")
+}
+
+func (m mockLimits) BloomNGramLength(_ string) int {
+ panic("implement me")
+}
+
+func (m mockLimits) BloomNGramSkip(_ string) int {
+ panic("implement me")
+}
+
+func (m mockLimits) BloomFalsePositiveRate(_ string) float64 {
+ panic("implement me")
+}
+
+func (m mockLimits) BloomCompactorMaxBlockSize(_ string) int {
+ panic("implement me")
+}
diff --git a/pkg/bloomcompactor/sharding.go b/pkg/bloomcompactor/sharding.go
deleted file mode 100644
index 9b3009bd5065..000000000000
--- a/pkg/bloomcompactor/sharding.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package bloomcompactor
-
-import (
- "github.com/grafana/dskit/ring"
-
- util_ring "github.com/grafana/loki/pkg/util/ring"
-)
-
-var (
- // TODO: Should we include LEAVING instances in the replication set?
- RingOp = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE}, nil)
-)
-
-// ShardingStrategy describes whether compactor "owns" given user or job.
-type ShardingStrategy interface {
- util_ring.TenantSharding
- OwnsFingerprint(tenantID string, fp uint64) (bool, error)
-}
-
-type ShuffleShardingStrategy struct {
- util_ring.TenantSharding
- ringLifeCycler *ring.BasicLifecycler
-}
-
-func NewShuffleShardingStrategy(r *ring.Ring, ringLifecycler *ring.BasicLifecycler, limits Limits) *ShuffleShardingStrategy {
- s := ShuffleShardingStrategy{
- TenantSharding: util_ring.NewTenantShuffleSharding(r, ringLifecycler, limits.BloomCompactorShardSize),
- ringLifeCycler: ringLifecycler,
- }
-
- return &s
-}
-
-// OwnsFingerprint makes sure only a single compactor processes the fingerprint.
-func (s *ShuffleShardingStrategy) OwnsFingerprint(tenantID string, fp uint64) (bool, error) {
- if !s.OwnsTenant(tenantID) {
- return false, nil
- }
-
- tenantRing := s.GetTenantSubRing(tenantID)
- fpSharding := util_ring.NewFingerprintShuffleSharding(tenantRing, s.ringLifeCycler, RingOp)
- return fpSharding.OwnsFingerprint(fp)
-}
-
-// NoopStrategy is an implementation of the ShardingStrategy that does not
-// filter anything.
-type NoopStrategy struct {
- util_ring.NoopStrategy
-}
-
-// OwnsFingerprint implements TenantShuffleSharding.
-func (s *NoopStrategy) OwnsFingerprint(_ string, _ uint64) (bool, error) {
- return true, nil
-}
-
-func NewNoopStrategy() *NoopStrategy {
- return &NoopStrategy{NoopStrategy: util_ring.NoopStrategy{}}
-}
diff --git a/pkg/bloomcompactor/sharding_test.go b/pkg/bloomcompactor/sharding_test.go
deleted file mode 100644
index 4e79752279fb..000000000000
--- a/pkg/bloomcompactor/sharding_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package bloomcompactor
-
-import (
- "context"
- "flag"
- "fmt"
- "testing"
- "time"
-
- "github.com/grafana/dskit/services"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/model"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/stretchr/testify/require"
-
- util_log "github.com/grafana/loki/pkg/util/log"
- lokiring "github.com/grafana/loki/pkg/util/ring"
- "github.com/grafana/loki/pkg/validation"
-)
-
-func TestShuffleSharding(t *testing.T) {
- const shardSize = 2
- const rings = 4
- const tenants = 2000
- const jobsPerTenant = 200
-
- var limits validation.Limits
- limits.RegisterFlags(flag.NewFlagSet("limits", flag.PanicOnError))
- overrides, err := validation.NewOverrides(limits, nil)
- require.NoError(t, err)
-
- var ringManagers []*lokiring.RingManager
- var shards []*ShuffleShardingStrategy
- for i := 0; i < rings; i++ {
- var ringCfg lokiring.RingConfig
- ringCfg.RegisterFlagsWithPrefix("", "", flag.NewFlagSet("ring", flag.PanicOnError))
- ringCfg.KVStore.Store = "inmemory"
- ringCfg.InstanceID = fmt.Sprintf("bloom-compactor-%d", i)
- ringCfg.InstanceAddr = fmt.Sprintf("localhost-%d", i)
-
- ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, ringCfg, 1, 1, util_log.Logger, prometheus.NewRegistry())
- require.NoError(t, err)
- require.NoError(t, ringManager.StartAsync(context.Background()))
-
- sharding := NewShuffleShardingStrategy(ringManager.Ring, ringManager.RingLifecycler, mockLimits{
- Overrides: overrides,
- bloomCompactorShardSize: shardSize,
- })
-
- ringManagers = append(ringManagers, ringManager)
- shards = append(shards, sharding)
- }
-
- // Wait for all rings to see each other.
- for i := 0; i < rings; i++ {
- require.Eventually(t, func() bool {
- running := ringManagers[i].State() == services.Running
- discovered := ringManagers[i].Ring.InstancesCount() == rings
- return running && discovered
- }, 1*time.Minute, 100*time.Millisecond)
- }
-
- // This is kind of an un-deterministic test, because sharding is random
- // and the seed is initialized by the ring lib.
- // Here we'll generate a bunch of tenants and test that if the sharding doesn't own the tenant,
- // that's because the tenant is owned by other ring instances.
- shard := shards[0]
- otherShards := shards[1:]
- var ownedTenants, ownedJobs int
- for i := 0; i < tenants; i++ {
- tenant := fmt.Sprintf("tenant-%d", i)
- ownsTenant := shard.OwnsTenant(tenant)
-
- var tenantOwnedByOther int
- for _, other := range otherShards {
- otherOwns := other.OwnsTenant(tenant)
- if otherOwns {
- tenantOwnedByOther++
- }
- }
-
- // If this shard owns the tenant, shardSize-1 other members should also own the tenant.
- // Otherwise, shardSize other members should own the tenant.
- if ownsTenant {
- require.Equal(t, shardSize-1, tenantOwnedByOther)
- ownedTenants++
- } else {
- require.Equal(t, shardSize, tenantOwnedByOther)
- }
-
- for j := 0; j < jobsPerTenant; j++ {
- lbls := labels.FromStrings("namespace", fmt.Sprintf("namespace-%d", j))
- fp := model.Fingerprint(lbls.Hash())
- ownsFingerprint, err := shard.OwnsFingerprint(tenant, uint64(fp))
- require.NoError(t, err)
-
- var jobOwnedByOther int
- for _, other := range otherShards {
- otherOwns, err := other.OwnsFingerprint(tenant, uint64(fp))
- require.NoError(t, err)
- if otherOwns {
- jobOwnedByOther++
- }
- }
-
- // If this shard owns the job, no one else should own the job.
- // And if this shard doesn't own the job, only one of the other shards should own the job.
- if ownsFingerprint {
- require.Equal(t, 0, jobOwnedByOther)
- ownedJobs++
- } else {
- require.Equal(t, 1, jobOwnedByOther)
- }
- }
- }
-
- t.Logf("owned tenants: %d (out of %d)", ownedTenants, tenants)
- t.Logf("owned jobs: %d (out of %d)", ownedJobs, tenants*jobsPerTenant)
-
- // Stop all rings and wait for them to stop.
- for i := 0; i < rings; i++ {
- ringManagers[i].StopAsync()
- require.Eventually(t, func() bool {
- return ringManagers[i].State() == services.Terminated
- }, 1*time.Minute, 100*time.Millisecond)
- }
-}
-
-type mockLimits struct {
- *validation.Overrides
- bloomCompactorShardSize int
- chunksDownloadingBatchSize int
- fpRate float64
-}
-
-func (m mockLimits) BloomFalsePositiveRate(_ string) float64 {
- return m.fpRate
-}
-
-func (m mockLimits) BloomCompactorShardSize(_ string) int {
- return m.bloomCompactorShardSize
-}
-
-func (m mockLimits) BloomCompactorChunksBatchSize(_ string) int {
- if m.chunksDownloadingBatchSize != 0 {
- return m.chunksDownloadingBatchSize
- }
- return 1
-}
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index 08e62a13acb7..d05b91d644df 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -8,6 +8,7 @@ import (
"sort"
"github.com/grafana/dskit/ring"
+ "github.com/prometheus/common/model"
"golang.org/x/exp/slices"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
@@ -38,11 +39,11 @@ func (i InstancesWithTokenRange) Contains(token uint32) bool {
return false
}
-// GetInstanceTokenRange calculates the token range for a specific instance
+// GetInstanceWithTokenRange calculates the token range for a specific instance
// with given id based on the first token in the ring.
// This assumes that each instance in the ring is configured with only a single
// token.
-func GetInstanceWithTokenRange(id string, instances []ring.InstanceDesc) InstancesWithTokenRange {
+func GetInstanceWithTokenRange(id string, instances []ring.InstanceDesc) (v1.FingerprintBounds, error) {
// Sorting the tokens of the instances would not be necessary if there is
// only a single token per instances, however, since we only assume one
@@ -64,23 +65,21 @@ func GetInstanceWithTokenRange(id string, instances []ring.InstanceDesc) Instanc
// instance with Id == id not found
if idx == -1 {
- return InstancesWithTokenRange{}
+ return v1.FingerprintBounds{}, ring.ErrInstanceNotFound
}
- i := uint32(idx)
- n := uint32(len(instances))
- step := math.MaxUint32 / n
+ i := uint64(idx)
+ n := uint64(len(instances))
+ step := math.MaxUint64 / n
- minToken := step * i
- maxToken := step*i + step - 1
+ minToken := model.Fingerprint(step * i)
+ maxToken := model.Fingerprint(step*i + step - 1)
if i == n-1 {
// extend the last token tange to MaxUint32
- maxToken = math.MaxUint32
+ maxToken = math.MaxUint64
}
- return InstancesWithTokenRange{
- {MinToken: minToken, MaxToken: maxToken, Instance: instances[i]},
- }
+ return v1.NewBounds(minToken, maxToken), nil
}
// GetInstancesWithTokenRanges calculates the token ranges for a specific
diff --git a/pkg/bloomutils/ring_test.go b/pkg/bloomutils/ring_test.go
index 30da072021ed..1346559372c3 100644
--- a/pkg/bloomutils/ring_test.go
+++ b/pkg/bloomutils/ring_test.go
@@ -6,6 +6,8 @@ import (
"github.com/grafana/dskit/ring"
"github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
func TestBloomGatewayClient_SortInstancesByToken(t *testing.T) {
@@ -67,7 +69,7 @@ func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
for name, tc := range map[string]struct {
id string
input []ring.InstanceDesc
- expected InstancesWithTokenRange
+ expected v1.FingerprintBounds
}{
"first instance includes 0 token": {
id: "3",
@@ -76,9 +78,7 @@ func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
{Id: "2", Tokens: []uint32{5}},
{Id: "3", Tokens: []uint32{1}},
},
- expected: InstancesWithTokenRange{
- {Instance: ring.InstanceDesc{Id: "3", Tokens: []uint32{1}}, MinToken: 0, MaxToken: math.MaxUint32/3 - 1},
- },
+ expected: v1.NewBounds(0, math.MaxUint64/3-1),
},
"middle instance": {
id: "1",
@@ -87,9 +87,7 @@ func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
{Id: "2", Tokens: []uint32{5}},
{Id: "3", Tokens: []uint32{1}},
},
- expected: InstancesWithTokenRange{
- {Instance: ring.InstanceDesc{Id: "1", Tokens: []uint32{3}}, MinToken: math.MaxUint32 / 3, MaxToken: math.MaxUint32/3*2 - 1},
- },
+ expected: v1.NewBounds(math.MaxUint64/3, math.MaxUint64/3*2-1),
},
"last instance includes MaxUint32 token": {
id: "2",
@@ -98,14 +96,13 @@ func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
{Id: "2", Tokens: []uint32{5}},
{Id: "3", Tokens: []uint32{1}},
},
- expected: InstancesWithTokenRange{
- {Instance: ring.InstanceDesc{Id: "2", Tokens: []uint32{5}}, MinToken: math.MaxUint32 / 3 * 2, MaxToken: math.MaxUint32},
- },
+ expected: v1.NewBounds(math.MaxUint64/3*2, math.MaxUint64),
},
} {
tc := tc
t.Run(name, func(t *testing.T) {
- result := GetInstanceWithTokenRange(tc.id, tc.input)
+ result, err := GetInstanceWithTokenRange(tc.id, tc.input)
+ require.NoError(t, err)
require.Equal(t, tc.expected, result)
})
}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 111d31395688..15ee955355a6 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -75,6 +75,7 @@ import (
util_log "github.com/grafana/loki/pkg/util/log"
"github.com/grafana/loki/pkg/util/querylimits"
lokiring "github.com/grafana/loki/pkg/util/ring"
+ util_ring "github.com/grafana/loki/pkg/util/ring"
serverutil "github.com/grafana/loki/pkg/util/server"
"github.com/grafana/loki/pkg/validation"
)
@@ -1418,7 +1419,7 @@ func (t *Loki) initBloomCompactor() (services.Service, error) {
logger := log.With(util_log.Logger, "component", "bloom-compactor")
- shuffleSharding := bloomcompactor.NewShuffleShardingStrategy(t.bloomCompactorRingManager.Ring, t.bloomCompactorRingManager.RingLifecycler, t.Overrides)
+ shuffleSharding := util_ring.NewTenantShuffleSharding(t.bloomCompactorRingManager.Ring, t.bloomCompactorRingManager.RingLifecycler, t.Overrides.BloomCompactorShardSize)
compactor, err := bloomcompactor.New(
t.Cfg.BloomCompactor,
diff --git a/pkg/util/ring/sharding.go b/pkg/util/ring/sharding.go
index 45a53cf40cfe..460e22d7f4c4 100644
--- a/pkg/util/ring/sharding.go
+++ b/pkg/util/ring/sharding.go
@@ -2,12 +2,10 @@ package ring
import (
"github.com/grafana/dskit/ring"
- "github.com/prometheus/common/model"
)
type TenantSharding interface {
- GetTenantSubRing(tenantID string) ring.ReadRing
- OwnsTenant(tenantID string) bool
+ OwnsTenant(tenantID string) (tenantRing ring.ReadRing, owned bool)
}
type TenantShuffleSharding struct {
@@ -28,60 +26,19 @@ func NewTenantShuffleSharding(
}
}
-func (s *TenantShuffleSharding) GetTenantSubRing(tenantID string) ring.ReadRing {
- shardSize := s.shardSizeForTenant(tenantID)
-
+func (s *TenantShuffleSharding) OwnsTenant(tenantID string) (ring.ReadRing, bool) {
// A shard size of 0 means shuffle sharding is disabled for this specific user,
+ shardSize := s.shardSizeForTenant(tenantID)
if shardSize <= 0 {
- return s.r
+ return s.r, true
}
- return s.r.ShuffleShard(tenantID, shardSize)
-}
-
-func (s *TenantShuffleSharding) OwnsTenant(tenantID string) bool {
- subRing := s.GetTenantSubRing(tenantID)
- return subRing.HasInstance(s.ringLifeCycler.GetInstanceID())
-}
-
-type FingerprintSharding interface {
- OwnsFingerprint(fp model.Fingerprint) (bool, error)
-}
-
-// FingerprintShuffleSharding is not thread-safe.
-type FingerprintShuffleSharding struct {
- r ring.ReadRing
- ringLifeCycler *ring.BasicLifecycler
- ringOp ring.Operation
-
- // Buffers for ring.Get() calls.
- bufDescs []ring.InstanceDesc
- bufHosts, bufZones []string
-}
-
-func NewFingerprintShuffleSharding(
- r ring.ReadRing,
- ringLifeCycler *ring.BasicLifecycler,
- ringOp ring.Operation,
-) *FingerprintShuffleSharding {
- s := FingerprintShuffleSharding{
- r: r,
- ringLifeCycler: ringLifeCycler,
- ringOp: ringOp,
+ subRing := s.r.ShuffleShard(tenantID, shardSize)
+ if subRing.HasInstance(s.ringLifeCycler.GetInstanceID()) {
+ return subRing, true
}
- s.bufDescs, s.bufHosts, s.bufZones = ring.MakeBuffersForGet()
-
- return &s
-}
-
-func (s *FingerprintShuffleSharding) OwnsFingerprint(fp uint64) (bool, error) {
- rs, err := s.r.Get(uint32(fp), s.ringOp, s.bufDescs, s.bufHosts, s.bufZones)
- if err != nil {
- return false, err
- }
-
- return rs.Includes(s.ringLifeCycler.GetInstanceAddr()), nil
+ return nil, false
}
// NoopStrategy is an implementation of the ShardingStrategy that does not
@@ -89,16 +46,6 @@ func (s *FingerprintShuffleSharding) OwnsFingerprint(fp uint64) (bool, error) {
type NoopStrategy struct{}
// OwnsTenant implements TenantShuffleSharding.
-func (s *NoopStrategy) OwnsTenant(_ string) bool {
- return false
-}
-
-// GetTenantSubRing implements TenantShuffleSharding.
-func (s *NoopStrategy) GetTenantSubRing(_ string) ring.ReadRing {
- return nil
-}
-
-// OwnsFingerprint implements FingerprintSharding.
-func (s *NoopStrategy) OwnsFingerprint(_ uint64) (bool, error) {
- return false, nil
+func (s *NoopStrategy) OwnsTenant(_ string) (ring.ReadRing, bool) {
+ return nil, false
}
From d0fae5cd690a6a1523004a4952e1957b2d8c263d Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Wed, 14 Feb 2024 16:21:44 -0800
Subject: [PATCH 068/130] [blooms] Remove unnecessary token sorting (#11958)
---
pkg/bloomutils/ring.go | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index d05b91d644df..20bb446ba15d 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -45,16 +45,8 @@ func (i InstancesWithTokenRange) Contains(token uint32) bool {
// token.
func GetInstanceWithTokenRange(id string, instances []ring.InstanceDesc) (v1.FingerprintBounds, error) {
- // Sorting the tokens of the instances would not be necessary if there is
- // only a single token per instances, however, since we only assume one
- // token, but don't enforce one token, we keep the sorting.
- for _, inst := range instances {
- sort.Slice(inst.Tokens, func(i, j int) bool {
- return inst.Tokens[i] < inst.Tokens[j]
- })
- }
-
- // Sort instances
+ // Sort instances -- they may not be sorted
+ // because they're usually accessed by looking up the tokens (which are sorted)
sort.Slice(instances, func(i, j int) bool {
return instances[i].Tokens[0] < instances[j].Tokens[0]
})
From 2177037ee3d47bfacc425cae200e60d8b32dbd04 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Thu, 15 Feb 2024 12:34:16 +0100
Subject: [PATCH 069/130] (chore): Simplify implementation of instance sort
iterator (#11959)
Signed-off-by: Christian Haudum
---
pkg/bloomutils/iter.go | 37 --------------------------
pkg/bloomutils/ring.go | 53 +++++++++++++++++++++----------------
pkg/bloomutils/ring_test.go | 5 ++++
3 files changed, 35 insertions(+), 60 deletions(-)
delete mode 100644 pkg/bloomutils/iter.go
diff --git a/pkg/bloomutils/iter.go b/pkg/bloomutils/iter.go
deleted file mode 100644
index fdbe4a5e6258..000000000000
--- a/pkg/bloomutils/iter.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package bloomutils
-
-import (
- "io"
-
- v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
-)
-
-// sortMergeIterator implements v1.Iterator
-type sortMergeIterator[T any, C comparable, R any] struct {
- curr *R
- heap *v1.HeapIterator[v1.IndexedValue[C]]
- items []T
- transform func(T, C, *R) *R
- err error
-}
-
-func (it *sortMergeIterator[T, C, R]) Next() bool {
- ok := it.heap.Next()
- if !ok {
- it.err = io.EOF
- return false
- }
-
- group := it.heap.At()
- it.curr = it.transform(it.items[group.Index()], group.Value(), it.curr)
-
- return true
-}
-
-func (it *sortMergeIterator[T, C, R]) At() R {
- return *it.curr
-}
-
-func (it *sortMergeIterator[T, C, R]) Err() error {
- return it.err
-}
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index 20bb446ba15d..6da275f607c2 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -107,31 +107,38 @@ func GetInstancesWithTokenRanges(id string, instances []ring.InstanceDesc) Insta
// NewInstanceSortMergeIterator creates an iterator that yields instanceWithToken elements
// where the token of the elements are sorted in ascending order.
func NewInstanceSortMergeIterator(instances []ring.InstanceDesc) v1.Iterator[InstanceWithTokenRange] {
- it := &sortMergeIterator[ring.InstanceDesc, uint32, InstanceWithTokenRange]{
- items: instances,
- transform: func(item ring.InstanceDesc, val uint32, prev *InstanceWithTokenRange) *InstanceWithTokenRange {
- var prevToken uint32
- if prev != nil {
- prevToken = prev.MaxToken + 1
- }
- return &InstanceWithTokenRange{Instance: item, MinToken: prevToken, MaxToken: val}
- },
- }
- sequences := make([]v1.PeekingIterator[v1.IndexedValue[uint32]], 0, len(instances))
- for i := range instances {
- sort.Slice(instances[i].Tokens, func(a, b int) bool {
- return instances[i].Tokens[a] < instances[i].Tokens[b]
- })
- iter := v1.NewIterWithIndex[uint32](v1.NewSliceIter(instances[i].Tokens), i)
- sequences = append(sequences, v1.NewPeekingIter[v1.IndexedValue[uint32]](iter))
+
+ tokenIters := make([]v1.PeekingIterator[v1.IndexedValue[uint32]], 0, len(instances))
+ for i, inst := range instances {
+ sort.Slice(inst.Tokens, func(a, b int) bool { return inst.Tokens[a] < inst.Tokens[b] })
+ itr := v1.NewIterWithIndex(v1.NewSliceIter[uint32](inst.Tokens), i)
+ tokenIters = append(tokenIters, v1.NewPeekingIter[v1.IndexedValue[uint32]](itr))
}
- it.heap = v1.NewHeapIterator(
- func(i, j v1.IndexedValue[uint32]) bool {
- return i.Value() < j.Value()
+
+ heapIter := v1.NewHeapIterator[v1.IndexedValue[uint32]](
+ func(iv1, iv2 v1.IndexedValue[uint32]) bool {
+ return iv1.Value() < iv2.Value()
},
- sequences...,
+ tokenIters...,
)
- it.err = nil
- return it
+ prevToken := -1
+ return v1.NewDedupingIter[v1.IndexedValue[uint32], InstanceWithTokenRange](
+ func(iv v1.IndexedValue[uint32], iwtr InstanceWithTokenRange) bool {
+ return false
+ },
+ func(iv v1.IndexedValue[uint32]) InstanceWithTokenRange {
+ minToken, maxToken := uint32(prevToken+1), iv.Value()
+ prevToken = int(maxToken)
+ return InstanceWithTokenRange{
+ Instance: instances[iv.Index()],
+ MinToken: minToken,
+ MaxToken: maxToken,
+ }
+ },
+ func(iv v1.IndexedValue[uint32], iwtr InstanceWithTokenRange) InstanceWithTokenRange {
+ panic("must not be called, because Eq() is always false")
+ },
+ v1.NewPeekingIter(heapIter),
+ )
}
diff --git a/pkg/bloomutils/ring_test.go b/pkg/bloomutils/ring_test.go
index 1346559372c3..6cac31949eef 100644
--- a/pkg/bloomutils/ring_test.go
+++ b/pkg/bloomutils/ring_test.go
@@ -11,6 +11,11 @@ import (
)
func TestBloomGatewayClient_SortInstancesByToken(t *testing.T) {
+ // | 1 2 3 4 5 6 7 8 9 |
+ // ---------+----------------------------+
+ // ID 1 | * * |
+ // ID 2 | * * |
+ // ID 3 | * |
input := []ring.InstanceDesc{
{Id: "1", Tokens: []uint32{5, 9}},
{Id: "2", Tokens: []uint32{3, 7}},
From 5967ceeb431f515f36bdf4e333cf1cdb23f52193 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Thu, 15 Feb 2024 13:16:17 +0100
Subject: [PATCH 070/130] Replace min/max token with `TokenRange` in bloom ring
utilities (#11960)
This PR replaces min/max token fields with a `TokenRange` field that uses the `Range[uint32]` type.
The `Range[uint32]` uses similar semantics as the `FingerprintBounds` we use for fingerprint ranges.
Signed-off-by: Christian Haudum
---
pkg/bloomcompactor/bloomcompactor.go | 4 +-
pkg/bloomgateway/client.go | 31 +++-----
pkg/bloomgateway/client_test.go | 39 +++++-----
pkg/bloomutils/ring.go | 106 ++++++++++++++-------------
pkg/bloomutils/ring_test.go | 68 +++++------------
5 files changed, 108 insertions(+), 140 deletions(-)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 36dcf36ed1fb..5a579f95fdb7 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -201,11 +201,11 @@ func (c *Compactor) ownsTenant(tenant string) (v1.FingerprintBounds, bool, error
}
- ownershipBounds, err := bloomutils.GetInstanceWithTokenRange(c.cfg.Ring.InstanceID, rs.Instances)
+ keyRange, err := bloomutils.KeyRangeForInstance(c.cfg.Ring.InstanceID, rs.Instances, bloomutils.Uint64Range)
if err != nil {
return v1.FingerprintBounds{}, false, errors.Wrap(err, "getting instance token range")
}
- return ownershipBounds, true, nil
+ return v1.NewBounds(model.Fingerprint(keyRange.Min), model.Fingerprint(keyRange.Max)), true, nil
}
// runs a single round of compaction for all relevant tenants and tables
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index 9a75e4e87c26..28400749404c 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -304,25 +304,23 @@ func serverAddressesWithTokenRanges(subRing ring.ReadRing, instances []ring.Inst
for it.Next() {
// We can use on of the tokens from the token range
// to obtain all addresses for that token.
- rs, err := subRing.Get(it.At().MaxToken, BlocksOwnerRead, bufDescs, bufHosts, bufZones)
+ rs, err := subRing.Get(it.At().TokenRange.Max, BlocksOwnerRead, bufDescs, bufHosts, bufZones)
if err != nil {
return nil, errors.Wrap(err, "bloom gateway get ring")
}
servers = append(servers, addrsWithTokenRange{
- id: it.At().Instance.Id,
- addrs: rs.GetAddresses(),
- minToken: it.At().MinToken,
- maxToken: it.At().MaxToken,
+ id: it.At().Instance.Id,
+ addrs: rs.GetAddresses(),
+ tokenRange: it.At().TokenRange,
})
}
- if len(servers) > 0 && servers[len(servers)-1].maxToken < math.MaxUint32 {
+ if len(servers) > 0 && servers[len(servers)-1].tokenRange.Max < math.MaxUint32 {
// append the instance for the token range between the greates token and MaxUint32
servers = append(servers, addrsWithTokenRange{
- id: servers[0].id,
- addrs: servers[0].addrs,
- minToken: servers[len(servers)-1].maxToken + 1,
- maxToken: math.MaxUint32,
+ id: servers[0].id,
+ addrs: servers[0].addrs,
+ tokenRange: bloomutils.NewTokenRange(servers[len(servers)-1].tokenRange.Max+1, math.MaxUint32),
})
}
return servers, nil
@@ -334,18 +332,13 @@ type instanceWithToken struct {
}
type addrsWithTokenRange struct {
- id string
- addrs []string
- minToken, maxToken uint32
+ id string
+ addrs []string
+ tokenRange bloomutils.Range[uint32]
}
func (s addrsWithTokenRange) cmp(token uint32) v1.BoundsCheck {
- if token < s.minToken {
- return v1.Before
- } else if token > s.maxToken {
- return v1.After
- }
- return v1.Overlap
+ return s.tokenRange.Cmp(token)
}
type instanceWithFingerprints struct {
diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go
index b1716de8150e..440347d1b248 100644
--- a/pkg/bloomgateway/client_test.go
+++ b/pkg/bloomgateway/client_test.go
@@ -19,6 +19,9 @@ import (
"github.com/grafana/loki/pkg/validation"
)
+// short constructor
+var newTr = bloomutils.NewTokenRange
+
func TestBloomGatewayClient(t *testing.T) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
@@ -53,10 +56,10 @@ func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
{Fingerprint: 401}, // out of bounds, will be dismissed
}
servers := []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 100},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 101, maxToken: 200},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: 201, maxToken: 300},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 301, maxToken: 400},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, 100)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(101, 200)},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, tokenRange: newTr(201, 300)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(301, 400)},
}
// partition fingerprints
@@ -135,9 +138,9 @@ func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
{Fingerprint: 350},
}
servers := []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 200},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 100, maxToken: 300},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: 200, maxToken: 400},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, 200)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(100, 300)},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, tokenRange: newTr(200, 400)},
}
// partition fingerprints
@@ -174,10 +177,10 @@ func TestBloomGatewayClient_ServerAddressesWithTokenRanges(t *testing.T) {
{Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{math.MaxUint32 / 6 * 5}},
},
expected: []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: math.MaxUint32 / 6 * 1},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: math.MaxUint32/6*1 + 1, maxToken: math.MaxUint32 / 6 * 3},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, minToken: math.MaxUint32/6*3 + 1, maxToken: math.MaxUint32 / 6 * 5},
- {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: math.MaxUint32/6*5 + 1, maxToken: math.MaxUint32},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, math.MaxUint32/6*1)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(math.MaxUint32/6*1+1, math.MaxUint32/6*3)},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, tokenRange: newTr(math.MaxUint32/6*3+1, math.MaxUint32/6*5)},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(math.MaxUint32/6*5+1, math.MaxUint32)},
},
},
"MinUint32 and MaxUint32 are tokens in the ring": {
@@ -186,10 +189,10 @@ func TestBloomGatewayClient_ServerAddressesWithTokenRanges(t *testing.T) {
{Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32 / 3 * 1, math.MaxUint32}},
},
expected: []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: 0, maxToken: 0},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: 1, maxToken: math.MaxUint32 / 3},
- {id: "instance-1", addrs: []string{"10.0.0.1"}, minToken: math.MaxUint32/3*1 + 1, maxToken: math.MaxUint32 / 3 * 2},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, minToken: math.MaxUint32/3*2 + 1, maxToken: math.MaxUint32},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, 0)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(1, math.MaxUint32/3)},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(math.MaxUint32/3*1+1, math.MaxUint32/3*2)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(math.MaxUint32/3*2+1, math.MaxUint32)},
},
},
}
@@ -215,7 +218,7 @@ func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
it := bloomutils.NewInstanceSortMergeIterator(instances)
for it.Next() {
- t.Log(it.At().MaxToken, it.At().Instance.Addr)
+ t.Log(it.At().TokenRange.Max, it.At().Instance.Addr)
}
testCases := []struct {
@@ -357,10 +360,10 @@ type mockRing struct {
// Get implements ring.ReadRing.
func (r *mockRing) Get(key uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
idx, _ := sort.Find(len(r.ranges), func(i int) int {
- if r.ranges[i].MaxToken < key {
+ if r.ranges[i].TokenRange.Max < key {
return 1
}
- if r.ranges[i].MaxToken > key {
+ if r.ranges[i].TokenRange.Max > key {
return -1
}
return 0
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index 6da275f607c2..102d3ed5e9a5 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -1,33 +1,62 @@
// This file contains a bunch of utility functions for bloom components.
-// TODO: Find a better location for this package
package bloomutils
import (
+ "errors"
+ "fmt"
"math"
"sort"
"github.com/grafana/dskit/ring"
- "github.com/prometheus/common/model"
+ "golang.org/x/exp/constraints"
"golang.org/x/exp/slices"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
-type InstanceWithTokenRange struct {
- Instance ring.InstanceDesc
- MinToken, MaxToken uint32
+var (
+ Uint32Range = Range[uint32]{Min: 0, Max: math.MaxUint32}
+ Uint64Range = Range[uint64]{Min: 0, Max: math.MaxUint64}
+)
+
+type Range[T constraints.Integer] struct {
+ Min, Max T
}
-func (i InstanceWithTokenRange) Cmp(token uint32) v1.BoundsCheck {
- if token < i.MinToken {
+func (r Range[T]) String() string {
+ return fmt.Sprintf("%016x-%016x", r.Min, r.Max)
+}
+
+func (r Range[T]) Less(other Range[T]) bool {
+ if r.Min != other.Min {
+ return r.Min < other.Min
+ }
+ return r.Max <= other.Max
+}
+
+func (r Range[T]) Cmp(t T) v1.BoundsCheck {
+ if t < r.Min {
return v1.Before
- } else if token > i.MaxToken {
+ } else if t > r.Max {
return v1.After
}
return v1.Overlap
}
+func NewTokenRange(min, max uint32) Range[uint32] {
+ return Range[uint32]{min, max}
+}
+
+type InstanceWithTokenRange struct {
+ Instance ring.InstanceDesc
+ TokenRange Range[uint32]
+}
+
+func (i InstanceWithTokenRange) Cmp(token uint32) v1.BoundsCheck {
+ return i.TokenRange.Cmp(token)
+}
+
type InstancesWithTokenRange []InstanceWithTokenRange
func (i InstancesWithTokenRange) Contains(token uint32) bool {
@@ -39,11 +68,11 @@ func (i InstancesWithTokenRange) Contains(token uint32) bool {
return false
}
-// GetInstanceWithTokenRange calculates the token range for a specific instance
+// KeyRangeForInstance calculates the token range for a specific instance
// with given id based on the first token in the ring.
// This assumes that each instance in the ring is configured with only a single
// token.
-func GetInstanceWithTokenRange(id string, instances []ring.InstanceDesc) (v1.FingerprintBounds, error) {
+func KeyRangeForInstance[T constraints.Integer](id string, instances []ring.InstanceDesc, keyspace Range[T]) (Range[T], error) {
// Sort instances -- they may not be sorted
// because they're usually accessed by looking up the tokens (which are sorted)
@@ -57,57 +86,31 @@ func GetInstanceWithTokenRange(id string, instances []ring.InstanceDesc) (v1.Fin
// instance with Id == id not found
if idx == -1 {
- return v1.FingerprintBounds{}, ring.ErrInstanceNotFound
+ return Range[T]{}, ring.ErrInstanceNotFound
}
- i := uint64(idx)
- n := uint64(len(instances))
- step := math.MaxUint64 / n
+ diff := keyspace.Max - keyspace.Min
+ i := T(idx)
+ n := T(len(instances))
- minToken := model.Fingerprint(step * i)
- maxToken := model.Fingerprint(step*i + step - 1)
+ if diff < n {
+ return Range[T]{}, errors.New("keyspace is smaller than amount of instances")
+ }
+
+ step := diff / n
+ min := step * i
+ max := step*i + step - 1
if i == n-1 {
// extend the last token tange to MaxUint32
- maxToken = math.MaxUint64
+ max = (keyspace.Max - keyspace.Min)
}
- return v1.NewBounds(minToken, maxToken), nil
-}
-
-// GetInstancesWithTokenRanges calculates the token ranges for a specific
-// instance with given id based on all tokens in the ring.
-// If the instances in the ring are configured with a single token, such as the
-// bloom compactor, use GetInstanceWithTokenRange() instead.
-func GetInstancesWithTokenRanges(id string, instances []ring.InstanceDesc) InstancesWithTokenRange {
- servers := make([]InstanceWithTokenRange, 0, len(instances))
- it := NewInstanceSortMergeIterator(instances)
- var firstInst ring.InstanceDesc
- var lastToken uint32
- for it.Next() {
- if firstInst.Id == "" {
- firstInst = it.At().Instance
- }
- if it.At().Instance.Id == id {
- servers = append(servers, it.At())
- }
- lastToken = it.At().MaxToken
- }
- // append token range from lastToken+1 to MaxUint32
- // only if the instance with the first token is the current one
- if len(servers) > 0 && firstInst.Id == id {
- servers = append(servers, InstanceWithTokenRange{
- MinToken: lastToken + 1,
- MaxToken: math.MaxUint32,
- Instance: servers[0].Instance,
- })
- }
- return servers
+ return Range[T]{min, max}, nil
}
// NewInstanceSortMergeIterator creates an iterator that yields instanceWithToken elements
// where the token of the elements are sorted in ascending order.
func NewInstanceSortMergeIterator(instances []ring.InstanceDesc) v1.Iterator[InstanceWithTokenRange] {
-
tokenIters := make([]v1.PeekingIterator[v1.IndexedValue[uint32]], 0, len(instances))
for i, inst := range instances {
sort.Slice(inst.Tokens, func(a, b int) bool { return inst.Tokens[a] < inst.Tokens[b] })
@@ -131,9 +134,8 @@ func NewInstanceSortMergeIterator(instances []ring.InstanceDesc) v1.Iterator[Ins
minToken, maxToken := uint32(prevToken+1), iv.Value()
prevToken = int(maxToken)
return InstanceWithTokenRange{
- Instance: instances[iv.Index()],
- MinToken: minToken,
- MaxToken: maxToken,
+ Instance: instances[iv.Index()],
+ TokenRange: NewTokenRange(minToken, maxToken),
}
},
func(iv v1.IndexedValue[uint32], iwtr InstanceWithTokenRange) InstanceWithTokenRange {
diff --git a/pkg/bloomutils/ring_test.go b/pkg/bloomutils/ring_test.go
index 6cac31949eef..c9ff6cf5e1d6 100644
--- a/pkg/bloomutils/ring_test.go
+++ b/pkg/bloomutils/ring_test.go
@@ -6,27 +6,25 @@ import (
"github.com/grafana/dskit/ring"
"github.com/stretchr/testify/require"
-
- v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
-func TestBloomGatewayClient_SortInstancesByToken(t *testing.T) {
- // | 1 2 3 4 5 6 7 8 9 |
- // ---------+----------------------------+
- // ID 1 | * * |
- // ID 2 | * * |
- // ID 3 | * |
+func TestBloomGatewayClient_InstanceSortMergeIterator(t *testing.T) {
+ // | 0 1 2 3 4 5 6 7 8 9 |
+ // ---------+---------------------+
+ // ID 1 | ***o ***o |
+ // ID 2 | ***o ***o |
+ // ID 3 | **o |
input := []ring.InstanceDesc{
{Id: "1", Tokens: []uint32{5, 9}},
{Id: "2", Tokens: []uint32{3, 7}},
{Id: "3", Tokens: []uint32{1}},
}
expected := []InstanceWithTokenRange{
- {Instance: input[2], MinToken: 0, MaxToken: 1},
- {Instance: input[1], MinToken: 2, MaxToken: 3},
- {Instance: input[0], MinToken: 4, MaxToken: 5},
- {Instance: input[1], MinToken: 6, MaxToken: 7},
- {Instance: input[0], MinToken: 8, MaxToken: 9},
+ {Instance: input[2], TokenRange: NewTokenRange(0, 1)},
+ {Instance: input[1], TokenRange: NewTokenRange(2, 3)},
+ {Instance: input[0], TokenRange: NewTokenRange(4, 5)},
+ {Instance: input[1], TokenRange: NewTokenRange(6, 7)},
+ {Instance: input[0], TokenRange: NewTokenRange(8, 9)},
}
var i int
@@ -38,43 +36,15 @@ func TestBloomGatewayClient_SortInstancesByToken(t *testing.T) {
}
}
-func TestBloomGatewayClient_GetInstancesWithTokenRanges(t *testing.T) {
- t.Run("instance does not own first token in the ring", func(t *testing.T) {
- input := []ring.InstanceDesc{
- {Id: "1", Tokens: []uint32{5, 9}},
- {Id: "2", Tokens: []uint32{3, 7}},
- {Id: "3", Tokens: []uint32{1}},
- }
- expected := InstancesWithTokenRange{
- {Instance: input[1], MinToken: 2, MaxToken: 3},
- {Instance: input[1], MinToken: 6, MaxToken: 7},
- }
-
- result := GetInstancesWithTokenRanges("2", input)
- require.Equal(t, expected, result)
- })
-
- t.Run("instance owns first token in the ring", func(t *testing.T) {
- input := []ring.InstanceDesc{
- {Id: "1", Tokens: []uint32{5, 9}},
- {Id: "2", Tokens: []uint32{3, 7}},
- {Id: "3", Tokens: []uint32{1}},
- }
- expected := InstancesWithTokenRange{
- {Instance: input[2], MinToken: 0, MaxToken: 1},
- {Instance: input[2], MinToken: 10, MaxToken: math.MaxUint32},
- }
-
- result := GetInstancesWithTokenRanges("3", input)
- require.Equal(t, expected, result)
- })
+func uint64Range(min, max uint64) Range[uint64] {
+ return Range[uint64]{min, max}
}
-func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
+func TestBloomGatewayClient_KeyRangeForInstance(t *testing.T) {
for name, tc := range map[string]struct {
id string
input []ring.InstanceDesc
- expected v1.FingerprintBounds
+ expected Range[uint64]
}{
"first instance includes 0 token": {
id: "3",
@@ -83,7 +53,7 @@ func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
{Id: "2", Tokens: []uint32{5}},
{Id: "3", Tokens: []uint32{1}},
},
- expected: v1.NewBounds(0, math.MaxUint64/3-1),
+ expected: uint64Range(0, math.MaxUint64/3-1),
},
"middle instance": {
id: "1",
@@ -92,7 +62,7 @@ func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
{Id: "2", Tokens: []uint32{5}},
{Id: "3", Tokens: []uint32{1}},
},
- expected: v1.NewBounds(math.MaxUint64/3, math.MaxUint64/3*2-1),
+ expected: uint64Range(math.MaxUint64/3, math.MaxUint64/3*2-1),
},
"last instance includes MaxUint32 token": {
id: "2",
@@ -101,12 +71,12 @@ func TestBloomGatewayClient_GetInstanceWithTokenRange(t *testing.T) {
{Id: "2", Tokens: []uint32{5}},
{Id: "3", Tokens: []uint32{1}},
},
- expected: v1.NewBounds(math.MaxUint64/3*2, math.MaxUint64),
+ expected: uint64Range(math.MaxUint64/3*2, math.MaxUint64),
},
} {
tc := tc
t.Run(name, func(t *testing.T) {
- result, err := GetInstanceWithTokenRange(tc.id, tc.input)
+ result, err := KeyRangeForInstance(tc.id, tc.input, Uint64Range)
require.NoError(t, err)
require.Equal(t, tc.expected, result)
})
From c328a4f42e8d4296a92df51345a5c02494c2e6fc Mon Sep 17 00:00:00 2001
From: J Stickler
Date: Thu, 15 Feb 2024 09:41:06 -0500
Subject: [PATCH 071/130] Fixing broken links (#11956)
---
docs/sources/release-notes/cadence.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/sources/release-notes/cadence.md b/docs/sources/release-notes/cadence.md
index f13781cf1c5f..ef6fbcaf072f 100644
--- a/docs/sources/release-notes/cadence.md
+++ b/docs/sources/release-notes/cadence.md
@@ -8,7 +8,7 @@ weight: 1
## Stable Releases
-Loki releases (this includes [Promtail](/clients/promtail), [Loki Canary](/operations/loki-canary/), etc) use the following
+Loki releases (this includes [Promtail](https://grafana.com/docs/loki//send-data/promtail/), [Loki Canary](https://grafana.com/docs/loki//operations/loki-canary/), etc.) use the following
naming scheme: `MAJOR`.`MINOR`.`PATCH`.
- `MAJOR` (roughly once a year): these releases include large new features and possible backwards-compatibility breaks.
@@ -18,14 +18,14 @@ naming scheme: `MAJOR`.`MINOR`.`PATCH`.
{{% admonition type="note" %}}
While our naming scheme resembles [Semantic Versioning](https://semver.org/), at this time we do not strictly follow its
guidelines to the letter. Our goal is to provide regular releases that are as stable as possible, and we take backwards-compatibility
-seriously. As with any software, always read the [release notes](/release-notes) and the [upgrade guide](/upgrading) whenever
+seriously. As with any software, always read the [release notes](https://grafana.com/docs/loki//release-notes/) and the [upgrade guide](https://grafana.com/docs/loki//setup/upgrade/) whenever
choosing a new version of Loki to install.
{{% /admonition %}}
New releases are based of a [weekly release](#weekly-releases) which we have vetted for stability over a number of weeks.
We strongly recommend keeping up-to-date with patch releases as they are released. We post updates of new releases in the `#loki` channel
-of our [Slack community](/community/getting-in-touch).
+of our [Slack community](https://grafana.com/docs/loki//community/getting-in-touch/).
You can find all of our releases [on GitHub](https://github.com/grafana/loki/releases) and on [Docker Hub](https://hub.docker.com/r/grafana/loki).
From a955ba93362cd3349d68511c01a26deec8a70f80 Mon Sep 17 00:00:00 2001
From: Zirko <64951262+QuantumEnigmaa@users.noreply.github.com>
Date: Thu, 15 Feb 2024 16:22:18 +0100
Subject: [PATCH 072/130] Helm: fix ciliumNetworkPolicy template in the chart
(#11963)
Signed-off-by: QuantumEnigmaa
---
production/helm/loki/CHANGELOG.md | 4 +++
production/helm/loki/Chart.yaml | 2 +-
production/helm/loki/README.md | 2 +-
.../loki/templates/ciliumnetworkpolicy.yaml | 29 +++++++++++--------
4 files changed, 23 insertions(+), 14 deletions(-)
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 47d8f6333e4e..d232a7d6c7ea 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -14,6 +14,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 5.43.1
+
+- [BUGFIX] Fix `toPorts` fields in the `ciliumnetworkpolicy` template
+
## 5.43.0
- [ENHANCEMENT] Allow the definition of resources for GrafanaAgent pods
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index ffa62c88d5cd..49d7ca836b8a 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.9.4
-version: 5.43.0
+version: 5.43.1
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 5db87e6d801e..a4ef51dd9d5e 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-![Version: 5.43.0](https://img.shields.io/badge/Version-5.43.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
+![Version: 5.43.1](https://img.shields.io/badge/Version-5.43.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/templates/ciliumnetworkpolicy.yaml b/production/helm/loki/templates/ciliumnetworkpolicy.yaml
index ddcef3b61a8e..1eb1644dd850 100644
--- a/production/helm/loki/templates/ciliumnetworkpolicy.yaml
+++ b/production/helm/loki/templates/ciliumnetworkpolicy.yaml
@@ -60,8 +60,9 @@ spec:
{{- include "loki.selectorLabels" . | nindent 6 }}
ingress:
- toPorts:
- - port: http
- protocol: TCP
+ - ports:
+ - port: http
+ protocol: TCP
{{- if .Values.networkPolicy.ingress.namespaceSelector }}
fromEndpoints:
- matchLabels:
@@ -85,8 +86,9 @@ spec:
{{- include "loki.selectorLabels" . | nindent 6 }}
ingress:
- toPorts:
- - port: http-metrics
- protocol: TCP
+ - ports:
+ - port: http-metrics
+ protocol: TCP
{{- if .Values.networkPolicy.metrics.cidrs }}
{{- range $cidr := .Values.networkPolicy.metrics.cidrs }}
toCIDR:
@@ -116,8 +118,9 @@ spec:
{{- include "loki.backendSelectorLabels" . | nindent 6 }}
egress:
- toPorts:
- - port: {{ .Values.networkPolicy.alertmanager.port }}
- protocol: TCP
+ - ports:
+ - port: "{{ .Values.networkPolicy.alertmanager.port }}"
+ protocol: TCP
{{- if .Values.networkPolicy.alertmanager.namespaceSelector }}
toEndpoints:
- matchLabels:
@@ -142,10 +145,11 @@ spec:
{{- include "loki.selectorLabels" . | nindent 6 }}
egress:
- toPorts:
- {{- range $port := .Values.networkPolicy.externalStorage.ports }}
- - port: {{ $port }}
- protocol: TCP
- {{- end }}
+ - ports:
+ {{- range $port := .Values.networkPolicy.externalStorage.ports }}
+ - port: "{{ $port }}"
+ protocol: TCP
+ {{- end }}à
{{- if .Values.networkPolicy.externalStorage.cidrs }}
{{- range $cidr := .Values.networkPolicy.externalStorage.cidrs }}
toCIDR:
@@ -171,8 +175,9 @@ spec:
{{- include "loki.selectorLabels" . | nindent 6 }}
egress:
- toPorts:
- - port: {{ .Values.networkPolicy.discovery.port }}
- protocol: TCP
+ - ports:
+ - port: "{{ .Values.networkPolicy.discovery.port }}"
+ protocol: TCP
{{- if .Values.networkPolicy.discovery.namespaceSelector }}
toEndpoints:
- matchLabels:
From cad4b8e749ccc19872473520f0456a43edc23be6 Mon Sep 17 00:00:00 2001
From: JordanRushing
Date: Thu, 15 Feb 2024 11:36:37 -0600
Subject: [PATCH 073/130] Reduce Distributor auto-forget unhealthy cycles from
10->2 (#11935)
Signed-off-by: JordanRushing
---
pkg/distributor/distributor.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index a5229b0ca149..f47148fa42b0 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -52,7 +52,7 @@ import (
const (
ringKey = "distributor"
- ringAutoForgetUnhealthyPeriods = 10
+ ringAutoForgetUnhealthyPeriods = 2
)
var (
From 4e1b210017a19519631db2c451858782de28835a Mon Sep 17 00:00:00 2001
From: Robert Jacob
Date: Thu, 15 Feb 2024 18:58:53 +0100
Subject: [PATCH 074/130] operator: Provide Azure region for managed
credentials using environment variable (#11964)
---
operator/CHANGELOG.md | 1 +
operator/internal/config/managed_auth.go | 2 +
...equest_create.go => credentialsrequest.go} | 31 ----------
...ate_test.go => credentialsrequest_test.go} | 61 ++-----------------
.../handlers/internal/storage/secrets.go | 7 ---
.../handlers/internal/storage/secrets_test.go | 21 -------
.../manifests/openshift/credentialsrequest.go | 11 ++++
.../internal/manifests/storage/options.go | 1 -
operator/internal/manifests/storage/var.go | 2 -
9 files changed, 19 insertions(+), 118 deletions(-)
rename operator/internal/handlers/{credentialsrequest_create.go => credentialsrequest.go} (67%)
rename operator/internal/handlers/{credentialsrequest_create_test.go => credentialsrequest_test.go} (71%)
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 59afb2970878..d504e4ee31b5 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [11964](https://github.com/grafana/loki/pull/11964) **xperimental**: Provide Azure region for managed credentials using environment variable
- [11920](https://github.com/grafana/loki/pull/11920) **xperimental**: Refactor handling of credentials in managed-auth mode
- [11869](https://github.com/grafana/loki/pull/11869) **periklis**: Add support for running with Google Workload Identity
- [11868](https://github.com/grafana/loki/pull/11868) **xperimental**: Integrate support for OpenShift-managed credentials in Azure
diff --git a/operator/internal/config/managed_auth.go b/operator/internal/config/managed_auth.go
index 73598e7032f8..76f9d72f3c26 100644
--- a/operator/internal/config/managed_auth.go
+++ b/operator/internal/config/managed_auth.go
@@ -26,6 +26,7 @@ func discoverManagedAuthConfig() *ManagedAuthConfig {
clientID := os.Getenv("CLIENTID")
tenantID := os.Getenv("TENANTID")
subscriptionID := os.Getenv("SUBSCRIPTIONID")
+ region := os.Getenv("REGION")
switch {
case roleARN != "":
@@ -40,6 +41,7 @@ func discoverManagedAuthConfig() *ManagedAuthConfig {
ClientID: clientID,
SubscriptionID: subscriptionID,
TenantID: tenantID,
+ Region: region,
},
}
}
diff --git a/operator/internal/handlers/credentialsrequest_create.go b/operator/internal/handlers/credentialsrequest.go
similarity index 67%
rename from operator/internal/handlers/credentialsrequest_create.go
rename to operator/internal/handlers/credentialsrequest.go
index 50e06375ffd8..0d562332dc9d 100644
--- a/operator/internal/handlers/credentialsrequest_create.go
+++ b/operator/internal/handlers/credentialsrequest.go
@@ -2,12 +2,10 @@ package handlers
import (
"context"
- "errors"
"fmt"
"github.com/ViaQ/logerr/v2/kverrors"
"github.com/go-logr/logr"
- corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
@@ -19,11 +17,8 @@ import (
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/openshift"
- "github.com/grafana/loki/operator/internal/manifests/storage"
)
-var errAzureNoRegion = errors.New("can not create CredentialsRequest: missing secret field: region")
-
// CreateCredentialsRequest creates a new CredentialsRequest resource for a Lokistack
// to request a cloud credentials Secret resource from the OpenShift cloud-credentials-operator.
func CreateCredentialsRequest(ctx context.Context, log logr.Logger, scheme *runtime.Scheme, managedAuth *config.ManagedAuthConfig, k k8s.Client, req ctrl.Request) error {
@@ -39,32 +34,6 @@ func CreateCredentialsRequest(ctx context.Context, log logr.Logger, scheme *runt
return kverrors.Wrap(err, "failed to lookup LokiStack", "name", req.String())
}
- if managedAuth.Azure != nil && managedAuth.Azure.Region == "" {
- // Managed environment for Azure does not provide Region, but we need this for the CredentialsRequest.
- // This looks like an oversight when creating the UI in OpenShift, but for now we need to pull this data
- // from somewhere else -> the Azure Storage Secret
- storageSecretName := client.ObjectKey{
- Namespace: stack.Namespace,
- Name: stack.Spec.Storage.Secret.Name,
- }
- storageSecret := &corev1.Secret{}
- if err := k.Get(ctx, storageSecretName, storageSecret); err != nil {
- if apierrors.IsNotFound(err) {
- // Skip this error here as it will be picked up by the LokiStack handler instead
- ll.Error(err, "could not find secret for LokiStack", "name", req.String())
- return nil
- }
- return err
- }
-
- region := storageSecret.Data[storage.KeyAzureRegion]
- if len(region) == 0 {
- return errAzureNoRegion
- }
-
- managedAuth.Azure.Region = string(region)
- }
-
opts := openshift.Options{
BuildOpts: openshift.BuildOptions{
LokiStackName: stack.Name,
diff --git a/operator/internal/handlers/credentialsrequest_create_test.go b/operator/internal/handlers/credentialsrequest_test.go
similarity index 71%
rename from operator/internal/handlers/credentialsrequest_create_test.go
rename to operator/internal/handlers/credentialsrequest_test.go
index 626302a11327..dd6dfb50d77d 100644
--- a/operator/internal/handlers/credentialsrequest_create_test.go
+++ b/operator/internal/handlers/credentialsrequest_test.go
@@ -6,7 +6,6 @@ import (
cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1"
"github.com/stretchr/testify/require"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -19,7 +18,7 @@ import (
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
-func credentialsRequestFakeClient(cr *cloudcredentialv1.CredentialsRequest, lokistack *lokiv1.LokiStack, secret *corev1.Secret) *k8sfakes.FakeClient {
+func credentialsRequestFakeClient(cr *cloudcredentialv1.CredentialsRequest, lokistack *lokiv1.LokiStack) *k8sfakes.FakeClient {
k := &k8sfakes.FakeClient{}
k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
switch object.(type) {
@@ -33,11 +32,6 @@ func credentialsRequestFakeClient(cr *cloudcredentialv1.CredentialsRequest, loki
return errors.NewNotFound(schema.GroupResource{}, name.Name)
}
k.SetClientObject(object, lokistack)
- case *corev1.Secret:
- if secret == nil {
- return errors.NewNotFound(schema.GroupResource{}, name.Name)
- }
- k.SetClientObject(object, secret)
}
return nil
}
@@ -58,7 +52,7 @@ func TestCreateCredentialsRequest_CreateNewResource(t *testing.T) {
},
}
- k := credentialsRequestFakeClient(nil, lokistack, nil)
+ k := credentialsRequestFakeClient(nil, lokistack)
req := ctrl.Request{
NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
}
@@ -89,13 +83,8 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) {
Namespace: "ns",
},
}
- secret := &corev1.Secret{
- Data: map[string][]byte{
- "region": []byte(wantRegion),
- },
- }
- k := credentialsRequestFakeClient(nil, lokistack, secret)
+ k := credentialsRequestFakeClient(nil, lokistack)
req := ctrl.Request{
NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
}
@@ -105,6 +94,7 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) {
ClientID: "test-client-id",
SubscriptionID: "test-tenant-id",
TenantID: "test-subscription-id",
+ Region: "test-region",
},
}
@@ -122,47 +112,6 @@ func TestCreateCredentialsRequest_CreateNewResourceAzure(t *testing.T) {
require.Equal(t, wantRegion, providerSpec.AzureRegion)
}
-func TestCreateCredentialsRequest_CreateNewResourceAzure_Errors(t *testing.T) {
- lokistack := &lokiv1.LokiStack{
- ObjectMeta: metav1.ObjectMeta{
- Name: "my-stack",
- Namespace: "ns",
- },
- }
- req := ctrl.Request{
- NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
- }
-
- tt := []struct {
- secret *corev1.Secret
- wantError string
- }{
- {
- secret: &corev1.Secret{},
- wantError: errAzureNoRegion.Error(),
- },
- }
-
- for _, tc := range tt {
- tc := tc
- t.Run(tc.wantError, func(t *testing.T) {
- t.Parallel()
-
- managedAuth := &config.ManagedAuthConfig{
- Azure: &config.AzureEnvironment{
- ClientID: "test-client-id",
- SubscriptionID: "test-tenant-id",
- TenantID: "test-subscription-id",
- },
- }
- k := credentialsRequestFakeClient(nil, lokistack, tc.secret)
-
- err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req)
- require.EqualError(t, err, tc.wantError)
- })
- }
-}
-
func TestCreateCredentialsRequest_DoNothing_WhenCredentialsRequestExist(t *testing.T) {
req := ctrl.Request{
NamespacedName: client.ObjectKey{Name: "my-stack", Namespace: "ns"},
@@ -187,7 +136,7 @@ func TestCreateCredentialsRequest_DoNothing_WhenCredentialsRequestExist(t *testi
},
}
- k := credentialsRequestFakeClient(cr, lokistack, nil)
+ k := credentialsRequestFakeClient(cr, lokistack)
err := CreateCredentialsRequest(context.Background(), logger, scheme, managedAuth, k, req)
require.NoError(t, err)
diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
index 99bafb911ec2..2492eea4d419 100644
--- a/operator/internal/handlers/internal/storage/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -182,18 +182,11 @@ func extractAzureConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*stor
// Extract and validate optional fields
endpointSuffix := s.Data[storage.KeyAzureStorageEndpointSuffix]
audience := s.Data[storage.KeyAzureAudience]
- region := s.Data[storage.KeyAzureRegion]
if !workloadIdentity && len(audience) > 0 {
return nil, fmt.Errorf("%w: %s", errSecretFieldNotAllowed, storage.KeyAzureAudience)
}
- if fg.OpenShift.ManagedAuthEnv {
- if len(region) == 0 {
- return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureRegion)
- }
- }
-
return &storage.AzureStorageConfig{
Env: string(env),
Container: string(container),
diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
index 1363cd4a660a..ca3623b718c1 100644
--- a/operator/internal/handlers/internal/storage/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -156,27 +156,6 @@ func TestAzureExtract(t *testing.T) {
},
wantError: "missing secret field: subscription_id",
},
- {
- name: "managed auth - no region",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{Name: "test"},
- Data: map[string][]byte{
- "environment": []byte("here"),
- "account_name": []byte("test-account-name"),
- "container": []byte("this,that"),
- },
- },
- managedSecret: &corev1.Secret{
- Data: map[string][]byte{},
- },
- featureGates: configv1.FeatureGates{
- OpenShift: configv1.OpenShiftFeatureGates{
- Enabled: true,
- ManagedAuthEnv: true,
- },
- },
- wantError: "missing secret field: region",
- },
{
name: "managed auth - no auth override",
secret: &corev1.Secret{
diff --git a/operator/internal/manifests/openshift/credentialsrequest.go b/operator/internal/manifests/openshift/credentialsrequest.go
index 0e97dd97c2b1..0c0a19adc98d 100644
--- a/operator/internal/manifests/openshift/credentialsrequest.go
+++ b/operator/internal/manifests/openshift/credentialsrequest.go
@@ -12,6 +12,8 @@ import (
"github.com/grafana/loki/operator/internal/manifests/storage"
)
+const azureFallbackRegion = "centralus"
+
func BuildCredentialsRequest(opts Options) (*cloudcredentialv1.CredentialsRequest, error) {
stack := client.ObjectKey{Name: opts.BuildOpts.LokiStackName, Namespace: opts.BuildOpts.LokiStackNamespace}
@@ -62,6 +64,15 @@ func encodeProviderSpec(env *config.ManagedAuthConfig) (*runtime.RawExtension, e
}
case env.Azure != nil:
azure := env.Azure
+ if azure.Region == "" {
+ // The OpenShift Console currently does not provide a UI to configure the Azure Region
+ // for an operator using managed credentials. Because the CredentialsRequest is currently
+ // not used to create a Managed Identity, the region is actually never used.
+ // We default to the US region if nothing is set, so that the CredentialsRequest can be
+ // created. This should have no effect on the generated credential secret.
+ // The region can be configured by setting an environment variable on the operator Subscription.
+ azure.Region = azureFallbackRegion
+ }
spec = &cloudcredentialv1.AzureProviderSpec{
Permissions: []string{
diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go
index 6693d2261e97..56e2b8e870df 100644
--- a/operator/internal/manifests/storage/options.go
+++ b/operator/internal/manifests/storage/options.go
@@ -63,7 +63,6 @@ type AzureStorageConfig struct {
Container string
EndpointSuffix string
Audience string
- Region string
WorkloadIdentity bool
}
diff --git a/operator/internal/manifests/storage/var.go b/operator/internal/manifests/storage/var.go
index cbd944a821c3..1f236406bdd0 100644
--- a/operator/internal/manifests/storage/var.go
+++ b/operator/internal/manifests/storage/var.go
@@ -88,8 +88,6 @@ const (
KeyAzureStorageEndpointSuffix = "endpoint_suffix"
// KeyAzureEnvironmentName is the secret data key for the Azure cloud environment name.
KeyAzureEnvironmentName = "environment"
- // KeyAzureRegion is the secret data key for storing the Azure cloud region.
- KeyAzureRegion = "region"
// KeyAzureAudience is the secret data key for customizing the audience used for the ServiceAccount token.
KeyAzureAudience = "audience"
From 443720f47b3e79d608cb4cc42c6f12e108352b29 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Thu, 15 Feb 2024 11:03:07 -0800
Subject: [PATCH 075/130] tsdb parsing handles uint (#11969)
---
.../shipper/indexshipper/tsdb/identifier.go | 2 +-
.../indexshipper/tsdb/identifier_test.go | 27 +++++++++++++------
2 files changed, 20 insertions(+), 9 deletions(-)
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
index 451688d040e3..943127f3e6b6 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier.go
@@ -128,7 +128,7 @@ func ParseSingleTenantTSDBPath(p string) (id SingleTenantTSDBIdentifier, ok bool
return
}
- checksum, err := strconv.ParseInt(elems[4], 16, 32)
+ checksum, err := strconv.ParseUint(elems[4], 16, 32)
if err != nil {
return
}
diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
index 7fcd56f89b0e..b21e8352b7a8 100644
--- a/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
+++ b/pkg/storage/stores/shipper/indexshipper/tsdb/identifier_test.go
@@ -1,6 +1,8 @@
package tsdb
import (
+ "fmt"
+ "math"
"testing"
"time"
@@ -9,11 +11,10 @@ import (
func TestParseSingleTenantTSDBPath(t *testing.T) {
for _, tc := range []struct {
- desc string
- input string
- id SingleTenantTSDBIdentifier
- parent string
- ok bool
+ desc string
+ input string
+ id SingleTenantTSDBIdentifier
+ ok bool
}{
{
desc: "simple_works",
@@ -24,8 +25,18 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
Through: 10,
Checksum: 255,
},
- parent: "parent",
- ok: true,
+ ok: true,
+ },
+ {
+ desc: "uint32_max_checksum_works",
+ input: fmt.Sprintf("1-compactor-1-10-%x.tsdb", math.MaxUint32),
+ id: SingleTenantTSDBIdentifier{
+ TS: time.Unix(1, 0),
+ From: 1,
+ Through: 10,
+ Checksum: math.MaxUint32,
+ },
+ ok: true,
},
{
desc: "wrong uploader name",
@@ -45,8 +56,8 @@ func TestParseSingleTenantTSDBPath(t *testing.T) {
} {
t.Run(tc.desc, func(t *testing.T) {
id, ok := ParseSingleTenantTSDBPath(tc.input)
- require.Equal(t, tc.id, id)
require.Equal(t, tc.ok, ok)
+ require.Equal(t, tc.id, id)
})
}
}
From 543aaab0553f3367688415a25b908453041644e4 Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Thu, 15 Feb 2024 20:21:41 +0100
Subject: [PATCH 076/130] (Blooms) Add metrics to compactor (#11966)
---
pkg/bloomcompactor/bloomcompactor.go | 21 ++++-
pkg/bloomcompactor/controller.go | 15 ++--
pkg/bloomcompactor/metrics.go | 113 +++++++++++++--------------
pkg/storage/bloom/v1/util.go | 26 ++++++
pkg/storage/bloom/v1/util_test.go | 26 ++++++
5 files changed, 135 insertions(+), 66 deletions(-)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 5a579f95fdb7..e8dc880f9d9d 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -109,9 +109,7 @@ func New(
c.logger,
)
- c.metrics.compactionRunInterval.Set(cfg.CompactionInterval.Seconds())
c.Service = services.NewBasicService(c.starting, c.running, c.stopping)
-
return c, nil
}
@@ -138,11 +136,17 @@ func (c *Compactor) running(ctx context.Context) error {
case <-ctx.Done():
return ctx.Err()
- case <-ticker.C:
+ case start := <-ticker.C:
+ c.metrics.compactionsStarted.Inc()
if err := c.runOne(ctx); err != nil {
- level.Error(c.logger).Log("msg", "compaction iteration failed", "err", err)
+ level.Error(c.logger).Log("msg", "compaction iteration failed", "err", err, "duration", time.Since(start))
+ c.metrics.compactionCompleted.WithLabelValues(statusFailure).Inc()
+ c.metrics.compactionTime.WithLabelValues(statusFailure).Observe(time.Since(start).Seconds())
return err
}
+ level.Info(c.logger).Log("msg", "compaction iteration completed", "duration", time.Since(start))
+ c.metrics.compactionCompleted.WithLabelValues(statusSuccess).Inc()
+ c.metrics.compactionTime.WithLabelValues(statusSuccess).Observe(time.Since(start).Seconds())
}
}
}
@@ -252,14 +256,17 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
}
for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil {
+ c.metrics.tenantsDiscovered.Inc()
tenant := tenants.At()
ownershipRange, owns, err := c.ownsTenant(tenant)
if err != nil {
return errors.Wrap(err, "checking tenant ownership")
}
if !owns {
+ c.metrics.tenantsSkipped.Inc()
continue
}
+ c.metrics.tenantsOwned.Inc()
select {
case ch <- tenantTable{tenant: tenant, table: table, ownershipRange: ownershipRange}:
@@ -296,7 +303,11 @@ func (c *Compactor) runWorkers(ctx context.Context, ch <-chan tenantTable) error
return nil
}
+ start := time.Now()
+ c.metrics.tenantsStarted.Inc()
if err := c.compactTenantTable(ctx, tt); err != nil {
+ c.metrics.tenantsCompleted.WithLabelValues(statusFailure).Inc()
+ c.metrics.tenantsCompletedTime.WithLabelValues(statusFailure).Observe(time.Since(start).Seconds())
return errors.Wrapf(
err,
"compacting tenant table (%s) for tenant (%s) with ownership (%s)",
@@ -305,6 +316,8 @@ func (c *Compactor) runWorkers(ctx context.Context, ch <-chan tenantTable) error
tt.ownershipRange,
)
}
+ c.metrics.tenantsCompleted.WithLabelValues(statusSuccess).Inc()
+ c.metrics.tenantsCompletedTime.WithLabelValues(statusSuccess).Observe(time.Since(start).Seconds())
}
}
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index cc801dc27e55..089ab800c7e3 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -25,7 +25,6 @@ type SimpleBloomController struct {
metrics *Metrics
limits Limits
- // TODO(owen-d): add metrics
logger log.Logger
}
@@ -269,6 +268,7 @@ func (s *SimpleBloomController) buildGaps(
maxBlockSize = uint64(s.limits.BloomCompactorMaxBlockSize(tenant))
blockOpts = v1.NewBlockOptions(nGramSize, nGramSkip, maxBlockSize)
created []bloomshipper.Meta
+ totalSeries uint64
)
for _, plan := range work {
@@ -295,10 +295,15 @@ func (s *SimpleBloomController) buildGaps(
return nil, errors.Wrap(err, "failed to get series and blocks")
}
+ // Blocks are built consuming the series iterator. For observability, we wrap the series iterator
+ // with a counter iterator to count the number of times Next() is called on it.
+ // This is used to observe the number of series that are being processed.
+ seriesItrWithCounter := v1.NewCounterIter[*v1.Series](seriesItr)
+
gen := NewSimpleBloomGenerator(
tenant,
blockOpts,
- seriesItr,
+ seriesItrWithCounter,
s.chunkLoader,
blocksIter,
s.rwFn,
@@ -307,9 +312,7 @@ func (s *SimpleBloomController) buildGaps(
)
_, loaded, newBlocks, err := gen.Generate(ctx)
-
if err != nil {
- // TODO(owen-d): metrics
level.Error(logger).Log("msg", "failed to generate bloom", "err", err)
s.closeLoadedBlocks(loaded, blocksIter)
return nil, errors.Wrap(err, "failed to generate bloom")
@@ -338,7 +341,6 @@ func (s *SimpleBloomController) buildGaps(
}
if err := newBlocks.Err(); err != nil {
- // TODO(owen-d): metrics
level.Error(logger).Log("msg", "failed to generate bloom", "err", err)
s.closeLoadedBlocks(loaded, blocksIter)
return nil, errors.Wrap(err, "failed to generate bloom")
@@ -360,9 +362,12 @@ func (s *SimpleBloomController) buildGaps(
return nil, errors.Wrap(err, "failed to write meta")
}
created = append(created, meta)
+
+ totalSeries += uint64(seriesItrWithCounter.Count())
}
}
+ s.metrics.tenantsSeries.Observe(float64(totalSeries))
level.Debug(logger).Log("msg", "finished bloom generation", "blocks", blockCt, "tsdbs", tsdbCt)
return created, nil
}
diff --git a/pkg/bloomcompactor/metrics.go b/pkg/bloomcompactor/metrics.go
index b02ac32aca72..350e3ed7e480 100644
--- a/pkg/bloomcompactor/metrics.go
+++ b/pkg/bloomcompactor/metrics.go
@@ -16,105 +16,104 @@ const (
)
type Metrics struct {
- bloomMetrics *v1.Metrics
- chunkSize prometheus.Histogram // uncompressed size of all chunks summed per series
+ bloomMetrics *v1.Metrics
+ compactorRunning prometheus.Gauge
+ chunkSize prometheus.Histogram // uncompressed size of all chunks summed per series
- compactionRunsStarted prometheus.Counter
- compactionRunsCompleted *prometheus.CounterVec
- compactionRunTime *prometheus.HistogramVec
- compactionRunDiscoveredTenants prometheus.Counter
- compactionRunSkippedTenants prometheus.Counter
- compactionRunTenantsCompleted *prometheus.CounterVec
- compactionRunTenantsTime *prometheus.HistogramVec
- compactionRunJobStarted prometheus.Counter
- compactionRunJobCompleted *prometheus.CounterVec
- compactionRunJobTime *prometheus.HistogramVec
- compactionRunInterval prometheus.Gauge
- compactorRunning prometheus.Gauge
+ compactionsStarted prometheus.Counter
+ compactionCompleted *prometheus.CounterVec
+ compactionTime *prometheus.HistogramVec
+
+ tenantsDiscovered prometheus.Counter
+ tenantsOwned prometheus.Counter
+ tenantsSkipped prometheus.Counter
+ tenantsStarted prometheus.Counter
+ tenantsCompleted *prometheus.CounterVec
+ tenantsCompletedTime *prometheus.HistogramVec
+ tenantsSeries prometheus.Histogram
}
func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
m := Metrics{
bloomMetrics: bloomMetrics,
+ compactorRunning: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "running",
+ Help: "Value will be 1 if compactor is currently running on this instance",
+ }),
chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
- Name: "bloom_chunk_series_size",
- Help: "Uncompressed size of chunks in a series",
- Buckets: prometheus.ExponentialBucketsRange(1024, 1073741824, 10),
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "chunk_series_size",
+ Help: "Uncompressed size of chunks in a series",
+ Buckets: prometheus.ExponentialBucketsRange(1024, 1073741824, 10),
}),
- compactionRunsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
+
+ compactionsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "runs_started_total",
+ Name: "compactions_started",
Help: "Total number of compactions started",
}),
- compactionRunsCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ compactionCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "runs_completed_total",
- Help: "Total number of compactions completed successfully",
+ Name: "compactions_completed",
+ Help: "Total number of compactions completed",
}, []string{"status"}),
- compactionRunTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
+ compactionTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "runs_time_seconds",
+ Name: "compactions_time_seconds",
Help: "Time spent during a compaction cycle.",
Buckets: prometheus.DefBuckets,
}, []string{"status"}),
- compactionRunDiscoveredTenants: promauto.With(r).NewCounter(prometheus.CounterOpts{
+
+ tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "tenants_discovered",
Help: "Number of tenants discovered during the current compaction run",
}),
- compactionRunSkippedTenants: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ tenantsOwned: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "tenants_skipped",
- Help: "Number of tenants skipped during the current compaction run",
+ Name: "tenants_owned",
+ Help: "Number of tenants owned by this instance",
}),
- compactionRunTenantsCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
- Name: "tenants_completed",
- Help: "Number of tenants successfully processed during the current compaction run",
- }, []string{"status"}),
- compactionRunTenantsTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
+ tenantsSkipped: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "tenants_time_seconds",
- Help: "Time spent processing tenants.",
- Buckets: prometheus.DefBuckets,
- }, []string{"status"}),
- compactionRunJobStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Name: "tenants_skipped",
+ Help: "Number of tenants skipped since they are not owned by this instance",
+ }),
+ tenantsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "job_started",
- Help: "Number of jobs started processing during the current compaction run",
+ Name: "tenants_started",
+ Help: "Number of tenants started to process during the current compaction run",
}),
- compactionRunJobCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ tenantsCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "job_completed",
- Help: "Number of jobs successfully processed during the current compaction run",
+ Name: "tenants_completed",
+ Help: "Number of tenants successfully processed during the current compaction run",
}, []string{"status"}),
- compactionRunJobTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
+ tenantsCompletedTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "job_time_seconds",
- Help: "Time spent processing jobs.",
+ Name: "tenants_time_seconds",
+ Help: "Time spent processing tenants.",
Buckets: prometheus.DefBuckets,
}, []string{"status"}),
- compactionRunInterval: promauto.With(r).NewGauge(prometheus.GaugeOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
- Name: "compaction_interval_seconds",
- Help: "The configured interval on which compaction is run in seconds",
- }),
- compactorRunning: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ tenantsSeries: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "running",
- Help: "Value will be 1 if compactor is currently running on this instance",
+ Name: "tenants_series",
+ Help: "Number of series processed per tenant in the owned fingerprint-range.",
+ // Up to 10M series per tenant, way more than what we expect given our max_global_streams_per_user limits
+ Buckets: prometheus.ExponentialBucketsRange(1, 10000000, 10),
}),
}
diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go
index 3b9e0631b715..67c0087a0d4b 100644
--- a/pkg/storage/bloom/v1/util.go
+++ b/pkg/storage/bloom/v1/util.go
@@ -276,3 +276,29 @@ func NewPeekCloseIter[T any](itr CloseableIterator[T]) *PeekCloseIter[T] {
func (it *PeekCloseIter[T]) Close() error {
return it.close()
}
+
+type CounterIterator[T any] interface {
+ Iterator[T]
+ Count() int
+}
+
+type CounterIter[T any] struct {
+ Iterator[T] // the underlying iterator
+ count int
+}
+
+func NewCounterIter[T any](itr Iterator[T]) *CounterIter[T] {
+ return &CounterIter[T]{Iterator: itr}
+}
+
+func (it *CounterIter[T]) Next() bool {
+ if it.Iterator.Next() {
+ it.count++
+ return true
+ }
+ return false
+}
+
+func (it *CounterIter[T]) Count() int {
+ return it.count
+}
diff --git a/pkg/storage/bloom/v1/util_test.go b/pkg/storage/bloom/v1/util_test.go
index ad89a226ec7f..afafa4d05a87 100644
--- a/pkg/storage/bloom/v1/util_test.go
+++ b/pkg/storage/bloom/v1/util_test.go
@@ -26,3 +26,29 @@ func TestPeekingIterator(t *testing.T) {
require.False(t, itr.Next())
}
+
+func TestCounterIter(t *testing.T) {
+ t.Parallel()
+
+ data := []int{1, 2, 3, 4, 5}
+ itr := NewCounterIter[int](NewSliceIter[int](data))
+ peekItr := NewPeekingIter[int](itr)
+
+ // Consume the outer iter and use peek
+ for {
+ if _, ok := peekItr.Peek(); !ok {
+ break
+ }
+ if !peekItr.Next() {
+ break
+ }
+ }
+ // Both iterators should be exhausted
+ require.False(t, itr.Next())
+ require.Nil(t, itr.Err())
+ require.False(t, peekItr.Next())
+ require.Nil(t, peekItr.Err())
+
+ // Assert that the count is correct and peeking hasn't jeopardized the count
+ require.Equal(t, len(data), itr.Count())
+}
From 87ae2efe41b3c9c0e905c13371959dd7a0463f83 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Thu, 15 Feb 2024 14:11:36 -0800
Subject: [PATCH 077/130] Pull/11950 ammendments (#11972)
Signed-off-by: Christian Haudum
Signed-off-by: Owen Diehl
Co-authored-by: Christian Haudum
---
pkg/bloomcompactor/batch.go | 367 +++++++++++++++---
pkg/bloomcompactor/batch_test.go | 211 +++++++++-
pkg/bloomcompactor/controller.go | 44 +--
pkg/bloomcompactor/spec.go | 239 +++---------
pkg/bloomcompactor/spec_test.go | 177 ++-------
pkg/storage/bloom/v1/builder.go | 23 +-
pkg/storage/bloom/v1/builder_test.go | 21 +-
pkg/storage/bloom/v1/util.go | 32 ++
.../stores/shipper/bloomshipper/cache.go | 7 +
9 files changed, 657 insertions(+), 464 deletions(-)
diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
index 2d43f83219df..e0787c1f6f1e 100644
--- a/pkg/bloomcompactor/batch.go
+++ b/pkg/bloomcompactor/batch.go
@@ -2,94 +2,359 @@ package bloomcompactor
import (
"context"
+ "io"
+ "math"
+ "time"
"github.com/grafana/dskit/multierror"
+ "golang.org/x/exp/slices"
+ "github.com/grafana/loki/pkg/chunkenc"
+ "github.com/grafana/loki/pkg/logproto"
+ logql_log "github.com/grafana/loki/pkg/logql/log"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
-// interface modeled from `pkg/storage/stores/shipper/bloomshipper.Fetcher`
-type blocksFetcher interface {
- FetchBlocks(context.Context, []bloomshipper.BlockRef) ([]*bloomshipper.CloseableBlockQuerier, error)
+type Fetcher[A, B any] interface {
+ Fetch(ctx context.Context, inputs []A) ([]B, error)
}
-func newBatchedBlockLoader(ctx context.Context, fetcher blocksFetcher, blocks []bloomshipper.BlockRef) (*batchedBlockLoader, error) {
- return &batchedBlockLoader{
- ctx: ctx,
- batchSize: 10, // make configurable?
- source: blocks,
- fetcher: fetcher,
- }, nil
+type FetchFunc[A, B any] func(ctx context.Context, inputs []A) ([]B, error)
+
+func (f FetchFunc[A, B]) Fetch(ctx context.Context, inputs []A) ([]B, error) {
+ return f(ctx, inputs)
}
-type batchedBlockLoader struct {
- ctx context.Context
+// batchedLoader implements `v1.Iterator[C]` in batches
+type batchedLoader[A, B, C any] struct {
+ metrics *Metrics
batchSize int
+ ctx context.Context
+ fetchers []Fetcher[A, B]
+ work [][]A
+
+ mapper func(B) (C, error)
+ cur C
+ batch []B
+ err error
+}
+
+const batchedLoaderDefaultBatchSize = 50
+
+func newBatchedLoader[A, B, C any](
+ ctx context.Context,
+ fetchers []Fetcher[A, B],
+ inputs [][]A,
+ mapper func(B) (C, error),
+ batchSize int,
+) *batchedLoader[A, B, C] {
+ return &batchedLoader[A, B, C]{
+ batchSize: max(batchSize, 1),
+ ctx: ctx,
+ fetchers: fetchers,
+ work: inputs,
+ mapper: mapper,
+ }
+}
+
+func (b *batchedLoader[A, B, C]) Next() bool {
+
+ // iterate work until we have non-zero length batch
+ for len(b.batch) == 0 {
+
+ // empty batch + no work remaining = we're done
+ if len(b.work) == 0 {
+ return false
+ }
+
+ // setup next batch
+ next := b.work[0]
+ batchSize := min(b.batchSize, len(next))
+ toFetch := next[:batchSize]
+ fetcher := b.fetchers[0]
+
+ // update work
+ b.work[0] = b.work[0][batchSize:]
+ if len(b.work[0]) == 0 {
+ // if we've exhausted work from this set of inputs,
+ // set pointer to next set of inputs
+ // and their respective fetcher
+ b.work = b.work[1:]
+ b.fetchers = b.fetchers[1:]
+ }
+
+ // there was no work in this batch; continue (should not happen)
+ if len(toFetch) == 0 {
+ continue
+ }
+
+ b.batch, b.err = fetcher.Fetch(b.ctx, toFetch)
+ // error fetching, short-circuit iteration
+ if b.err != nil {
+ return false
+ }
+ }
- source []bloomshipper.BlockRef
- fetcher blocksFetcher
+ return b.prepNext()
+}
- batch []*bloomshipper.CloseableBlockQuerier
- cur *bloomshipper.CloseableBlockQuerier
- err error
+func (b *batchedLoader[_, B, C]) prepNext() bool {
+ b.cur, b.err = b.mapper(b.batch[0])
+ b.batch = b.batch[1:]
+ return b.err == nil
}
-// At implements v1.CloseableIterator.
-func (b *batchedBlockLoader) At() *bloomshipper.CloseableBlockQuerier {
+func (b *batchedLoader[_, _, C]) At() C {
return b.cur
}
-// Close implements v1.CloseableIterator.
-func (b *batchedBlockLoader) Close() error {
- if b.cur != nil {
- return b.cur.Close()
+func (b *batchedLoader[_, _, _]) Err() error {
+ return b.err
+}
+
+// to ensure memory is bounded while loading chunks
+// TODO(owen-d): testware
+func newBatchedChunkLoader(
+ ctx context.Context,
+ fetchers []Fetcher[chunk.Chunk, chunk.Chunk],
+ inputs [][]chunk.Chunk,
+ metrics *Metrics,
+ batchSize int,
+) *batchedLoader[chunk.Chunk, chunk.Chunk, v1.ChunkRefWithIter] {
+
+ mapper := func(c chunk.Chunk) (v1.ChunkRefWithIter, error) {
+ chk := c.Data.(*chunkenc.Facade).LokiChunk()
+ metrics.chunkSize.Observe(float64(chk.UncompressedSize()))
+ itr, err := chk.Iterator(
+ ctx,
+ time.Unix(0, 0),
+ time.Unix(0, math.MaxInt64),
+ logproto.FORWARD,
+ logql_log.NewNoopPipeline().ForStream(c.Metric),
+ )
+
+ if err != nil {
+ return v1.ChunkRefWithIter{}, err
+ }
+
+ return v1.ChunkRefWithIter{
+ Ref: v1.ChunkRef{
+ Start: c.From,
+ End: c.Through,
+ Checksum: c.Checksum,
+ },
+ Itr: itr,
+ }, nil
}
- return nil
+ return newBatchedLoader(ctx, fetchers, inputs, mapper, batchSize)
}
-// CloseBatch closes the remaining items from the current batch
-func (b *batchedBlockLoader) CloseBatch() error {
- var err multierror.MultiError
- for _, cur := range b.batch {
- err.Add(cur.Close())
+func newBatchedBlockLoader(
+ ctx context.Context,
+ fetcher Fetcher[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier],
+ blocks []bloomshipper.BlockRef,
+ batchSize int,
+) *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier] {
+
+ fetchers := []Fetcher[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier]{fetcher}
+ inputs := [][]bloomshipper.BlockRef{blocks}
+ mapper := func(a *bloomshipper.CloseableBlockQuerier) (*bloomshipper.CloseableBlockQuerier, error) {
+ return a, nil
}
- if len(b.batch) > 0 {
- b.batch = b.batch[:0]
+
+ return newBatchedLoader(ctx, fetchers, inputs, mapper, batchSize)
+}
+
+// compiler checks
+var _ v1.Iterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
+var _ v1.CloseableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
+var _ v1.ResettableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
+
+// TODO(chaudum): testware
+func newBlockLoadingIter(ctx context.Context, blocks []bloomshipper.BlockRef, fetcher FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier], batchSize int) *blockLoadingIter {
+
+ return &blockLoadingIter{
+ ctx: ctx,
+ fetcher: fetcher,
+ inputs: blocks,
+ batchSize: batchSize,
+ loaded: make(map[io.Closer]struct{}),
+ }
+}
+
+type blockLoadingIter struct {
+ // constructor arguments
+ ctx context.Context
+ fetcher Fetcher[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier]
+ inputs []bloomshipper.BlockRef
+ overlapping v1.Iterator[[]bloomshipper.BlockRef]
+ batchSize int
+ // optional arguments
+ filter func(*bloomshipper.CloseableBlockQuerier) bool
+ // internals
+ initialized bool
+ err error
+ iter v1.Iterator[*v1.SeriesWithBloom]
+ loader *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier]
+ loaded map[io.Closer]struct{}
+}
+
+// At implements v1.Iterator.
+func (i *blockLoadingIter) At() *v1.SeriesWithBloom {
+ if !i.initialized {
+ panic("iterator not initialized")
+ }
+ return i.iter.At()
+}
+
+// Err implements v1.Iterator.
+func (i *blockLoadingIter) Err() error {
+ if !i.initialized {
+ panic("iterator not initialized")
+ }
+ if i.err != nil {
+ return i.err
+ }
+ return i.iter.Err()
+}
+
+// Next implements v1.Iterator.
+func (i *blockLoadingIter) Next() bool {
+ i.init()
+ // next from current batch
+ hasNext := i.iter.Next()
+ if !hasNext && !i.loadNext() {
+ return false
+ }
+ // next from next batch
+ return i.iter.Next()
+}
+
+// Close implements v1.CloseableIterator.
+func (i *blockLoadingIter) Close() error {
+ var err multierror.MultiError
+ for k := range i.loaded {
+ err.Add(k.Close())
}
return err.Err()
}
-// Err implements v1.CloseableIterator.
-func (b *batchedBlockLoader) Err() error {
- return b.err
+// Reset implements v1.ResettableIterator.
+// TODO(chaudum) Cache already fetched blocks to to avoid the overhead of
+// creating the reader.
+func (i *blockLoadingIter) Reset() error {
+ if !i.initialized {
+ return nil
+ }
+ // close loaded queriers
+ err := i.Close()
+ i.initialized = false
+ clear(i.loaded)
+ return err
}
-// Next implements v1.CloseableIterator.
-func (b *batchedBlockLoader) Next() bool {
- if len(b.batch) > 0 {
- return b.setNext()
+func (i *blockLoadingIter) init() {
+ if i.initialized {
+ return
}
- if len(b.source) == 0 {
+ // group overlapping blocks
+ i.overlapping = overlappingBlocksIter(i.inputs)
+
+ // set "match all" filter function if not present
+ if i.filter == nil {
+ i.filter = func(cbq *bloomshipper.CloseableBlockQuerier) bool { return true }
+ }
+
+ // load first batch
+ i.loadNext()
+
+ // done
+ i.initialized = true
+}
+
+func (i *blockLoadingIter) Filter(filter func(*bloomshipper.CloseableBlockQuerier) bool) {
+ if i.initialized {
+ panic("iterator already initialized")
+ }
+ i.filter = filter
+}
+
+func (i *blockLoadingIter) loadNext() bool {
+ // check if there are more overlapping groups to load
+ if !i.overlapping.Next() {
+ return false
+ }
+
+ if i.overlapping.Err() != nil {
+ i.err = i.overlapping.Err()
return false
}
- // setup next batch
- batchSize := min(b.batchSize, len(b.source))
- toFetch := b.source[:batchSize]
+ blockRefs := i.overlapping.At()
- // update source
- b.source = b.source[batchSize:]
+ loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize)
+ filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
- b.batch, b.err = b.fetcher.FetchBlocks(b.ctx, toFetch)
- if b.err != nil {
+ iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
+ for filtered.Next() && filtered.Err() == nil {
+ bq := loader.At()
+ if _, ok := i.loaded[bq]; !ok {
+ i.loaded[bq] = struct{}{}
+ }
+ iter, _ := bq.SeriesIter()
+ iters = append(iters, iter)
+ }
+
+ if loader.Err() != nil {
+ i.err = loader.Err()
return false
}
- return b.setNext()
-}
-func (b *batchedBlockLoader) setNext() bool {
- b.cur, b.err = b.batch[0], nil
- b.batch = b.batch[1:]
+ if len(iters) == 0 {
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ return true
+ }
+
+ // Turn the list of blocks into a single iterator that returns the next series
+ mergedBlocks := v1.NewHeapIterForSeriesWithBloom(iters...)
+ // two overlapping blocks can conceivably have the same series, so we need to dedupe,
+ // preferring the one with the most chunks already indexed since we'll have
+ // to add fewer chunks to the bloom
+ i.iter = v1.NewDedupingIter[*v1.SeriesWithBloom, *v1.SeriesWithBloom](
+ func(a, b *v1.SeriesWithBloom) bool {
+ return a.Series.Fingerprint == b.Series.Fingerprint
+ },
+ v1.Identity[*v1.SeriesWithBloom],
+ func(a, b *v1.SeriesWithBloom) *v1.SeriesWithBloom {
+ if len(a.Series.Chunks) > len(b.Series.Chunks) {
+ return a
+ }
+ return b
+ },
+ v1.NewPeekingIter(mergedBlocks),
+ )
return true
}
+
+func overlappingBlocksIter(inputs []bloomshipper.BlockRef) v1.Iterator[[]bloomshipper.BlockRef] {
+ // can we assume sorted blocks?
+ peekIter := v1.NewPeekingIter(v1.NewSliceIter(inputs))
+
+ return v1.NewDedupingIter[bloomshipper.BlockRef, []bloomshipper.BlockRef](
+ func(a bloomshipper.BlockRef, b []bloomshipper.BlockRef) bool {
+ minFp := b[0].Bounds.Min
+ maxFp := slices.MaxFunc(b, func(a, b bloomshipper.BlockRef) int { return int(a.Bounds.Max - b.Bounds.Max) }).Bounds.Max
+ return a.Bounds.Overlaps(v1.NewBounds(minFp, maxFp))
+ },
+ func(a bloomshipper.BlockRef) []bloomshipper.BlockRef {
+ return []bloomshipper.BlockRef{a}
+ },
+ func(a bloomshipper.BlockRef, b []bloomshipper.BlockRef) []bloomshipper.BlockRef {
+ return append(b, a)
+ },
+ peekIter,
+ )
+}
diff --git a/pkg/bloomcompactor/batch_test.go b/pkg/bloomcompactor/batch_test.go
index a1922bf931b8..bd2cb3378cfb 100644
--- a/pkg/bloomcompactor/batch_test.go
+++ b/pkg/bloomcompactor/batch_test.go
@@ -2,36 +2,209 @@ package bloomcompactor
import (
"context"
+ "errors"
"testing"
"github.com/stretchr/testify/require"
- "go.uber.org/atomic"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
-type dummyBlocksFetcher struct {
- count *atomic.Int32
-}
+func TestBatchedLoader(t *testing.T) {
+ t.Parallel()
-func (f *dummyBlocksFetcher) FetchBlocks(_ context.Context, blocks []bloomshipper.BlockRef) ([]*bloomshipper.CloseableBlockQuerier, error) {
- f.count.Inc()
- return make([]*bloomshipper.CloseableBlockQuerier, len(blocks)), nil
-}
+ errMapper := func(i int) (int, error) {
+ return 0, errors.New("bzzt")
+ }
+ successMapper := func(i int) (int, error) {
+ return i, nil
+ }
-func TestBatchedBlockLoader(t *testing.T) {
- ctx := context.Background()
- f := &dummyBlocksFetcher{count: atomic.NewInt32(0)}
+ expired, cancel := context.WithCancel(context.Background())
+ cancel()
- blocks := make([]bloomshipper.BlockRef, 25)
- blocksIter, err := newBatchedBlockLoader(ctx, f, blocks)
- require.NoError(t, err)
+ for _, tc := range []struct {
+ desc string
+ ctx context.Context
+ batchSize int
+ mapper func(int) (int, error)
+ err bool
+ inputs [][]int
+ exp []int
+ }{
+ {
+ desc: "OneBatch",
+ ctx: context.Background(),
+ batchSize: 2,
+ mapper: successMapper,
+ err: false,
+ inputs: [][]int{{0, 1}},
+ exp: []int{0, 1},
+ },
+ {
+ desc: "ZeroBatchSizeStillWorks",
+ ctx: context.Background(),
+ batchSize: 0,
+ mapper: successMapper,
+ err: false,
+ inputs: [][]int{{0, 1}},
+ exp: []int{0, 1},
+ },
+ {
+ desc: "OneBatchLessThanFull",
+ ctx: context.Background(),
+ batchSize: 2,
+ mapper: successMapper,
+ err: false,
+ inputs: [][]int{{0}},
+ exp: []int{0},
+ },
+ {
+ desc: "TwoBatches",
+ ctx: context.Background(),
+ batchSize: 2,
+ mapper: successMapper,
+ err: false,
+ inputs: [][]int{{0, 1, 2, 3}},
+ exp: []int{0, 1, 2, 3},
+ },
+ {
+ desc: "MultipleBatchesMultipleLoaders",
+ ctx: context.Background(),
+ batchSize: 2,
+ mapper: successMapper,
+ err: false,
+ inputs: [][]int{{0, 1}, {2}, {3, 4, 5}},
+ exp: []int{0, 1, 2, 3, 4, 5},
+ },
+ {
+ desc: "HandlesEmptyInputs",
+ ctx: context.Background(),
+ batchSize: 2,
+ mapper: successMapper,
+ err: false,
+ inputs: [][]int{{0, 1, 2, 3}, nil, {4}},
+ exp: []int{0, 1, 2, 3, 4},
+ },
+ {
+ desc: "Timeout",
+ ctx: expired,
+ batchSize: 2,
+ mapper: successMapper,
+ err: true,
+ inputs: [][]int{{0}},
+ },
+ {
+ desc: "MappingFailure",
+ ctx: context.Background(),
+ batchSize: 2,
+ mapper: errMapper,
+ err: true,
+ inputs: [][]int{{0}},
+ },
+ } {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ fetchers := make([]Fetcher[int, int], 0, len(tc.inputs))
+ for range tc.inputs {
+ fetchers = append(
+ fetchers,
+ FetchFunc[int, int](func(ctx context.Context, xs []int) ([]int, error) {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ return xs, nil
+ }),
+ )
+ }
- var count int
- for blocksIter.Next() && blocksIter.Err() == nil {
- count++
+ loader := newBatchedLoader[int, int, int](
+ tc.ctx,
+ fetchers,
+ tc.inputs,
+ tc.mapper,
+ tc.batchSize,
+ )
+
+ got, err := v1.Collect[int](loader)
+ if tc.err {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ require.Equal(t, tc.exp, got)
+
+ })
}
+}
- require.Equal(t, len(blocks), count)
- require.Equal(t, int32(len(blocks)/blocksIter.batchSize+1), f.count.Load())
+func TestOverlappingBlocksIter(t *testing.T) {
+ t.Parallel()
+ for _, tc := range []struct {
+ desc string
+ inp []bloomshipper.BlockRef
+ exp int // expected groups
+ }{
+ {
+ desc: "Empty",
+ inp: []bloomshipper.BlockRef{},
+ exp: 0,
+ },
+ {
+ desc: "NonOverlapping",
+ inp: []bloomshipper.BlockRef{
+ genBlockRef(0x0000, 0x00ff),
+ genBlockRef(0x0100, 0x01ff),
+ genBlockRef(0x0200, 0x02ff),
+ },
+ exp: 3,
+ },
+ {
+ desc: "AllOverlapping",
+ inp: []bloomshipper.BlockRef{
+ genBlockRef(0x0000, 0x02ff), // |-----------|
+ genBlockRef(0x0100, 0x01ff), // |---|
+ genBlockRef(0x0200, 0x02ff), // |---|
+ },
+ exp: 1,
+ },
+ {
+ desc: "PartialOverlapping",
+ inp: []bloomshipper.BlockRef{
+ genBlockRef(0x0000, 0x01ff), // group 1 |-------|
+ genBlockRef(0x0100, 0x02ff), // group 1 |-------|
+ genBlockRef(0x0200, 0x03ff), // group 1 |-------|
+ genBlockRef(0x0200, 0x02ff), // group 1 |---|
+ },
+ exp: 1,
+ },
+ {
+ desc: "PartialOverlapping",
+ inp: []bloomshipper.BlockRef{
+ genBlockRef(0x0000, 0x01ff), // group 1 |-------|
+ genBlockRef(0x0100, 0x02ff), // group 1 |-------|
+ genBlockRef(0x0100, 0x01ff), // group 1 |---|
+ genBlockRef(0x0300, 0x03ff), // group 2 |---|
+ genBlockRef(0x0310, 0x03ff), // group 2 |-|
+ },
+ exp: 2,
+ },
+ } {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ it := overlappingBlocksIter(tc.inp)
+ var overlapping [][]bloomshipper.BlockRef
+ var i int
+ for it.Next() && it.Err() == nil {
+ require.NotNil(t, it.At())
+ overlapping = append(overlapping, it.At())
+ for _, r := range it.At() {
+ t.Log(i, r)
+ }
+ i++
+ }
+ require.Equal(t, tc.exp, len(overlapping))
+ })
+ }
}
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index 089ab800c7e3..cee0e6f05820 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -4,12 +4,10 @@ import (
"bytes"
"context"
"fmt"
- "io"
"sort"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/grafana/dskit/multierror"
"github.com/pkg/errors"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
@@ -221,7 +219,7 @@ func (s *SimpleBloomController) loadWorkForGap(
tenant string,
id tsdb.Identifier,
gap gapWithBlocks,
-) (v1.CloseableIterator[*v1.Series], v1.CloseableIterator[*bloomshipper.CloseableBlockQuerier], error) {
+) (v1.CloseableIterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBloom], error) {
// load a series iterator for the gap
seriesItr, err := s.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.bounds)
if err != nil {
@@ -234,10 +232,8 @@ func (s *SimpleBloomController) loadWorkForGap(
return nil, nil, errors.Wrap(err, "failed to get fetcher")
}
- blocksIter, err := newBatchedBlockLoader(ctx, fetcher, gap.blocks)
- if err != nil {
- return nil, nil, errors.Wrap(err, "failed to load blocks")
- }
+ f := FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier](fetcher.FetchBlocks)
+ blocksIter := newBlockLoadingIter(ctx, gap.blocks, f, 10)
return seriesItr, blocksIter, nil
}
@@ -311,10 +307,10 @@ func (s *SimpleBloomController) buildGaps(
log.With(logger, "tsdb", plan.tsdb.Name(), "ownership", gap),
)
- _, loaded, newBlocks, err := gen.Generate(ctx)
+ newBlocks := gen.Generate(ctx)
if err != nil {
level.Error(logger).Log("msg", "failed to generate bloom", "err", err)
- s.closeLoadedBlocks(loaded, blocksIter)
+ blocksIter.Close()
return nil, errors.Wrap(err, "failed to generate bloom")
}
@@ -325,6 +321,7 @@ func (s *SimpleBloomController) buildGaps(
built, err := bloomshipper.BlockFrom(tenant, table.Addr(), blk)
if err != nil {
level.Error(logger).Log("msg", "failed to build block", "err", err)
+ blocksIter.Close()
return nil, errors.Wrap(err, "failed to build block")
}
@@ -333,7 +330,7 @@ func (s *SimpleBloomController) buildGaps(
built,
); err != nil {
level.Error(logger).Log("msg", "failed to write block", "err", err)
- s.closeLoadedBlocks(loaded, blocksIter)
+ blocksIter.Close()
return nil, errors.Wrap(err, "failed to write block")
}
@@ -342,12 +339,11 @@ func (s *SimpleBloomController) buildGaps(
if err := newBlocks.Err(); err != nil {
level.Error(logger).Log("msg", "failed to generate bloom", "err", err)
- s.closeLoadedBlocks(loaded, blocksIter)
return nil, errors.Wrap(err, "failed to generate bloom")
}
// Close pre-existing blocks
- s.closeLoadedBlocks(loaded, blocksIter)
+ blocksIter.Close()
// Write the new meta
ref, err := bloomshipper.MetaRefFrom(tenant, table.Addr(), gap.bounds, meta.Sources, meta.Blocks)
@@ -485,30 +481,6 @@ func tsdbsStrictlyNewer(as, bs []tsdb.SingleTenantTSDBIdentifier) bool {
return true
}
-func (s *SimpleBloomController) closeLoadedBlocks(toClose []io.Closer, it v1.CloseableIterator[*bloomshipper.CloseableBlockQuerier]) {
- // close loaded blocks
- var err multierror.MultiError
- for _, closer := range toClose {
- err.Add(closer.Close())
- }
-
- switch itr := it.(type) {
- case *batchedBlockLoader:
- // close remaining loaded blocks from batch
- err.Add(itr.CloseBatch())
- default:
- // close remaining loaded blocks
- for itr.Next() && itr.Err() == nil {
- err.Add(itr.At().Close())
- }
- }
-
- // log error
- if err.Err() != nil {
- level.Error(s.logger).Log("msg", "failed to close blocks", "err", err)
- }
-}
-
type gapWithBlocks struct {
bounds v1.FingerprintBounds
blocks []bloomshipper.BlockRef
diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go
index 4a1125082ca5..67d41b650e37 100644
--- a/pkg/bloomcompactor/spec.go
+++ b/pkg/bloomcompactor/spec.go
@@ -4,17 +4,13 @@ import (
"context"
"fmt"
"io"
- "math"
- "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
- "github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/logproto"
- logql_log "github.com/grafana/loki/pkg/logql/log"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/chunk/fetcher"
@@ -48,7 +44,8 @@ type SimpleBloomGenerator struct {
userID string
store v1.Iterator[*v1.Series]
chunkLoader ChunkLoader
- blocksIter v1.CloseableIterator[*bloomshipper.CloseableBlockQuerier]
+ blocksIter v1.ResettableIterator[*v1.SeriesWithBloom]
+ skipped []v1.BlockMetadata
// options to build blocks with
opts v1.BlockOptions
@@ -70,7 +67,7 @@ func NewSimpleBloomGenerator(
opts v1.BlockOptions,
store v1.Iterator[*v1.Series],
chunkLoader ChunkLoader,
- blocksIter v1.CloseableIterator[*bloomshipper.CloseableBlockQuerier],
+ blocksIter v1.ResettableIterator[*v1.SeriesWithBloom],
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
metrics *Metrics,
logger log.Logger,
@@ -107,44 +104,41 @@ func (s *SimpleBloomGenerator) populator(ctx context.Context) func(series *v1.Se
}
-func (s *SimpleBloomGenerator) Generate(ctx context.Context) ([]v1.BlockMetadata, []io.Closer, v1.Iterator[*v1.Block], error) {
- skippedBlocks := make([]v1.BlockMetadata, 0)
- toClose := make([]io.Closer, 0)
- blocksMatchingSchema := make([]*bloomshipper.CloseableBlockQuerier, 0)
+func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Block] {
+ level.Debug(s.logger).Log("msg", "generating bloom filters for blocks", "schema", fmt.Sprintf("%+v", s.opts.Schema))
- for s.blocksIter.Next() && s.blocksIter.Err() == nil {
- block := s.blocksIter.At()
- toClose = append(toClose, block)
-
- logger := log.With(s.logger, "block", block.BlockRef)
- md, err := block.Metadata()
- schema := md.Options.Schema
- if err != nil {
- level.Warn(logger).Log("msg", "failed to get schema for block", "err", err)
- skippedBlocks = append(skippedBlocks, md)
- continue
- }
-
- if !s.opts.Schema.Compatible(schema) {
- level.Warn(logger).Log("msg", "block schema incompatible with options", "generator_schema", fmt.Sprintf("%+v", s.opts.Schema), "block_schema", fmt.Sprintf("%+v", schema))
- skippedBlocks = append(skippedBlocks, md)
- continue
- }
-
- level.Debug(logger).Log("msg", "adding compatible block to bloom generation inputs")
- blocksMatchingSchema = append(blocksMatchingSchema, block)
- }
+ series := v1.NewPeekingIter(s.store)
- if s.blocksIter.Err() != nil {
- // should we ignore the error and continue with the blocks we got?
- return skippedBlocks, toClose, v1.NewSliceIter([]*v1.Block{}), s.blocksIter.Err()
+ // TODO: Use interface
+ impl, ok := s.blocksIter.(*blockLoadingIter)
+ if ok {
+ impl.Filter(
+ func(bq *bloomshipper.CloseableBlockQuerier) bool {
+
+ logger := log.With(s.logger, "block", bq.BlockRef)
+ md, err := bq.Metadata()
+ schema := md.Options.Schema
+ if err != nil {
+ level.Warn(logger).Log("msg", "failed to get schema for block", "err", err)
+ s.skipped = append(s.skipped, md)
+ bq.Close() // close unused querier
+ return false
+ }
+
+ if !s.opts.Schema.Compatible(schema) {
+ level.Warn(logger).Log("msg", "block schema incompatible with options", "generator_schema", fmt.Sprintf("%+v", s.opts.Schema), "block_schema", fmt.Sprintf("%+v", schema))
+ s.skipped = append(s.skipped, md)
+ bq.Close() // close unused querier
+ return false
+ }
+
+ level.Debug(logger).Log("msg", "adding compatible block to bloom generation inputs")
+ return true
+ },
+ )
}
- level.Debug(s.logger).Log("msg", "generating bloom filters for blocks", "num_blocks", len(blocksMatchingSchema), "skipped_blocks", len(skippedBlocks), "schema", fmt.Sprintf("%+v", s.opts.Schema))
-
- series := v1.NewPeekingIter(s.store)
- blockIter := NewLazyBlockBuilderIterator(ctx, s.opts, s.populator(ctx), s.readWriterFn, series, blocksMatchingSchema)
- return skippedBlocks, toClose, blockIter, nil
+ return NewLazyBlockBuilderIterator(ctx, s.opts, s.populator(ctx), s.readWriterFn, series, s.blocksIter)
}
// LazyBlockBuilderIterator is a lazy iterator over blocks that builds
@@ -155,11 +149,10 @@ type LazyBlockBuilderIterator struct {
populate func(*v1.Series, *v1.Bloom) error
readWriterFn func() (v1.BlockWriter, v1.BlockReader)
series v1.PeekingIterator[*v1.Series]
- blocks []*bloomshipper.CloseableBlockQuerier
+ blocks v1.ResettableIterator[*v1.SeriesWithBloom]
- blocksAsPeekingIter []v1.PeekingIterator[*v1.SeriesWithBloom]
- curr *v1.Block
- err error
+ curr *v1.Block
+ err error
}
func NewLazyBlockBuilderIterator(
@@ -168,20 +161,16 @@ func NewLazyBlockBuilderIterator(
populate func(*v1.Series, *v1.Bloom) error,
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
series v1.PeekingIterator[*v1.Series],
- blocks []*bloomshipper.CloseableBlockQuerier,
+ blocks v1.ResettableIterator[*v1.SeriesWithBloom],
) *LazyBlockBuilderIterator {
- it := &LazyBlockBuilderIterator{
+ return &LazyBlockBuilderIterator{
ctx: ctx,
opts: opts,
populate: populate,
readWriterFn: readWriterFn,
series: series,
blocks: blocks,
-
- blocksAsPeekingIter: make([]v1.PeekingIterator[*v1.SeriesWithBloom], len(blocks)),
}
-
- return it
}
func (b *LazyBlockBuilderIterator) Next() bool {
@@ -190,21 +179,17 @@ func (b *LazyBlockBuilderIterator) Next() bool {
return false
}
- // reset all the blocks to the start
- for i, block := range b.blocks {
- if err := block.Reset(); err != nil {
- b.err = errors.Wrapf(err, "failed to reset block iterator %d", i)
- return false
- }
- b.blocksAsPeekingIter[i] = v1.NewPeekingIter[*v1.SeriesWithBloom](block)
- }
-
if err := b.ctx.Err(); err != nil {
b.err = errors.Wrap(err, "context canceled")
return false
}
- mergeBuilder := v1.NewMergeBuilder(b.blocksAsPeekingIter, b.series, b.populate)
+ if err := b.blocks.Reset(); err != nil {
+ b.err = errors.Wrap(err, "reset blocks iterator")
+ return false
+ }
+
+ mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate)
writer, reader := b.readWriterFn()
blockBuilder, err := v1.NewBlockBuilder(b.opts, writer)
if err != nil {
@@ -292,137 +277,3 @@ func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.S
itr: newBatchedChunkLoader(ctx, fetchers, inputs, s.metrics, batchedLoaderDefaultBatchSize),
}, nil
}
-
-type Fetcher[A, B any] interface {
- Fetch(ctx context.Context, inputs []A) ([]B, error)
-}
-
-type FetchFunc[A, B any] func(ctx context.Context, inputs []A) ([]B, error)
-
-func (f FetchFunc[A, B]) Fetch(ctx context.Context, inputs []A) ([]B, error) {
- return f(ctx, inputs)
-}
-
-// batchedLoader implements `v1.Iterator[v1.ChunkRefWithIter]` in batches
-// to ensure memory is bounded while loading chunks
-// TODO(owen-d): testware
-type batchedLoader[A, B, C any] struct {
- metrics *Metrics
- batchSize int
- ctx context.Context
- fetchers []Fetcher[A, B]
- work [][]A
-
- mapper func(B) (C, error)
- cur C
- batch []B
- err error
-}
-
-const batchedLoaderDefaultBatchSize = 50
-
-func newBatchedLoader[A, B, C any](
- ctx context.Context,
- fetchers []Fetcher[A, B],
- inputs [][]A,
- mapper func(B) (C, error),
- batchSize int,
-) *batchedLoader[A, B, C] {
- return &batchedLoader[A, B, C]{
- batchSize: max(batchSize, 1),
- ctx: ctx,
- fetchers: fetchers,
- work: inputs,
- mapper: mapper,
- }
-}
-
-func (b *batchedLoader[A, B, C]) Next() bool {
-
- // iterate work until we have non-zero length batch
- for len(b.batch) == 0 {
-
- // empty batch + no work remaining = we're done
- if len(b.work) == 0 {
- return false
- }
-
- // setup next batch
- next := b.work[0]
- batchSize := min(b.batchSize, len(next))
- toFetch := next[:batchSize]
- fetcher := b.fetchers[0]
-
- // update work
- b.work[0] = b.work[0][batchSize:]
- if len(b.work[0]) == 0 {
- // if we've exhausted work from this set of inputs,
- // set pointer to next set of inputs
- // and their respective fetcher
- b.work = b.work[1:]
- b.fetchers = b.fetchers[1:]
- }
-
- // there was no work in this batch; continue (should not happen)
- if len(toFetch) == 0 {
- continue
- }
-
- b.batch, b.err = fetcher.Fetch(b.ctx, toFetch)
- // error fetching, short-circuit iteration
- if b.err != nil {
- return false
- }
- }
-
- return b.prepNext()
-}
-
-func (b *batchedLoader[_, B, C]) prepNext() bool {
- b.cur, b.err = b.mapper(b.batch[0])
- b.batch = b.batch[1:]
- return b.err == nil
-}
-
-func newBatchedChunkLoader(
- ctx context.Context,
- fetchers []Fetcher[chunk.Chunk, chunk.Chunk],
- inputs [][]chunk.Chunk,
- metrics *Metrics,
- batchSize int,
-) *batchedLoader[chunk.Chunk, chunk.Chunk, v1.ChunkRefWithIter] {
-
- mapper := func(c chunk.Chunk) (v1.ChunkRefWithIter, error) {
- chk := c.Data.(*chunkenc.Facade).LokiChunk()
- metrics.chunkSize.Observe(float64(chk.UncompressedSize()))
- itr, err := chk.Iterator(
- ctx,
- time.Unix(0, 0),
- time.Unix(0, math.MaxInt64),
- logproto.FORWARD,
- logql_log.NewNoopPipeline().ForStream(c.Metric),
- )
-
- if err != nil {
- return v1.ChunkRefWithIter{}, err
- }
-
- return v1.ChunkRefWithIter{
- Ref: v1.ChunkRef{
- Start: c.From,
- End: c.Through,
- Checksum: c.Checksum,
- },
- Itr: itr,
- }, nil
- }
- return newBatchedLoader(ctx, fetchers, inputs, mapper, batchSize)
-}
-
-func (b *batchedLoader[_, _, C]) At() C {
- return b.cur
-}
-
-func (b *batchedLoader[_, _, _]) Err() error {
- return b.err
-}
diff --git a/pkg/bloomcompactor/spec_test.go b/pkg/bloomcompactor/spec_test.go
index bb4fde6cc235..f278948fed7a 100644
--- a/pkg/bloomcompactor/spec_test.go
+++ b/pkg/bloomcompactor/spec_test.go
@@ -3,7 +3,6 @@ package bloomcompactor
import (
"bytes"
"context"
- "errors"
"testing"
"github.com/go-kit/log"
@@ -14,20 +13,19 @@ import (
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
-func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBloom) {
+func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) {
return blocksFromSchemaWithRange(t, n, options, 0, 0xffff)
}
// splits 100 series across `n` non-overlapping blocks.
// uses options to build blocks with.
-func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBloom) {
+func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) {
if 100%n != 0 {
panic("100 series must be evenly divisible by n")
}
numSeries := 100
- numKeysPerSeries := 10000
- data, _ = v1.MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, fromFP, throughFp, 0, 10000)
+ data, _ = v1.MkBasicSeriesWithBlooms(numSeries, 0, fromFP, throughFp, 0, 10000)
seriesPerBlock := numSeries / n
@@ -44,14 +42,19 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro
)
require.Nil(t, err)
- itr := v1.NewSliceIter[v1.SeriesWithBloom](data[i*seriesPerBlock : (i+1)*seriesPerBlock])
+ minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock
+
+ itr := v1.NewSliceIter[v1.SeriesWithBloom](data[minIdx:maxIdx])
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
res = append(res, v1.NewBlock(reader))
+ ref := genBlockRef(data[minIdx].Series.Fingerprint, data[maxIdx-1].Series.Fingerprint)
+ t.Log("create block", ref)
+ refs = append(refs, ref)
}
- return res, data
+ return res, data, refs
}
// doesn't actually load any chunks
@@ -64,14 +67,30 @@ func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) (*C
}, nil
}
-func dummyBloomGen(opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block) *SimpleBloomGenerator {
+func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator {
bqs := make([]*bloomshipper.CloseableBlockQuerier, 0, len(blocks))
- for _, b := range blocks {
+ for i, b := range blocks {
bqs = append(bqs, &bloomshipper.CloseableBlockQuerier{
+ BlockRef: refs[i],
BlockQuerier: v1.NewBlockQuerier(b),
})
}
- blocksIter := v1.NewCloseableIterator(v1.NewSliceIter(bqs))
+
+ fetcher := func(_ context.Context, refs []bloomshipper.BlockRef) ([]*bloomshipper.CloseableBlockQuerier, error) {
+ res := make([]*bloomshipper.CloseableBlockQuerier, 0, len(refs))
+ for _, ref := range refs {
+ for _, bq := range bqs {
+ if ref.Bounds.Equal(bq.Bounds) {
+ res = append(res, bq)
+ }
+ }
+ }
+ t.Log("req", refs)
+ t.Log("res", res)
+ return res, nil
+ }
+
+ blocksIter := newBlockLoadingIter(context.Background(), refs, FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier](fetcher), 1)
return NewSimpleBloomGenerator(
"fake",
@@ -95,6 +114,7 @@ func TestSimpleBloomGenerator(t *testing.T) {
desc string
fromSchema, toSchema v1.BlockOptions
sourceBlocks, numSkipped, outputBlocks int
+ overlapping bool
}{
{
desc: "SkipsIncompatibleSchemas",
@@ -118,11 +138,11 @@ func TestSimpleBloomGenerator(t *testing.T) {
toSchema: v1.NewBlockOptions(4, 0, 1<<10), // 1KB
sourceBlocks: 2,
numSkipped: 0,
- outputBlocks: 3,
+ outputBlocks: 6,
},
} {
t.Run(tc.desc, func(t *testing.T) {
- sourceBlocks, data := blocksFromSchema(t, tc.sourceBlocks, tc.fromSchema)
+ sourceBlocks, data, refs := blocksFromSchemaWithRange(t, tc.sourceBlocks, tc.fromSchema, 0x00000, 0x6ffff)
storeItr := v1.NewMapIter[v1.SeriesWithBloom, *v1.Series](
v1.NewSliceIter[v1.SeriesWithBloom](data),
func(swb v1.SeriesWithBloom) *v1.Series {
@@ -130,16 +150,15 @@ func TestSimpleBloomGenerator(t *testing.T) {
},
)
- gen := dummyBloomGen(tc.toSchema, storeItr, sourceBlocks)
- skipped, _, results, err := gen.Generate(context.Background())
- require.Nil(t, err)
- require.Equal(t, tc.numSkipped, len(skipped))
+ gen := dummyBloomGen(t, tc.toSchema, storeItr, sourceBlocks, refs)
+ results := gen.Generate(context.Background())
var outputBlocks []*v1.Block
for results.Next() {
outputBlocks = append(outputBlocks, results.At())
}
require.Equal(t, tc.outputBlocks, len(outputBlocks))
+ require.Equal(t, tc.numSkipped, len(gen.skipped))
// Check all the input series are present in the output blocks.
expectedRefs := v1.PointerSlice(data)
@@ -157,129 +176,3 @@ func TestSimpleBloomGenerator(t *testing.T) {
})
}
}
-
-func TestBatchedLoader(t *testing.T) {
- errMapper := func(i int) (int, error) {
- return 0, errors.New("bzzt")
- }
- successMapper := func(i int) (int, error) {
- return i, nil
- }
-
- expired, cancel := context.WithCancel(context.Background())
- cancel()
-
- for _, tc := range []struct {
- desc string
- ctx context.Context
- batchSize int
- mapper func(int) (int, error)
- err bool
- inputs [][]int
- exp []int
- }{
- {
- desc: "OneBatch",
- ctx: context.Background(),
- batchSize: 2,
- mapper: successMapper,
- err: false,
- inputs: [][]int{{0, 1}},
- exp: []int{0, 1},
- },
- {
- desc: "ZeroBatchSizeStillWorks",
- ctx: context.Background(),
- batchSize: 0,
- mapper: successMapper,
- err: false,
- inputs: [][]int{{0, 1}},
- exp: []int{0, 1},
- },
- {
- desc: "OneBatchLessThanFull",
- ctx: context.Background(),
- batchSize: 2,
- mapper: successMapper,
- err: false,
- inputs: [][]int{{0}},
- exp: []int{0},
- },
- {
- desc: "TwoBatches",
- ctx: context.Background(),
- batchSize: 2,
- mapper: successMapper,
- err: false,
- inputs: [][]int{{0, 1, 2, 3}},
- exp: []int{0, 1, 2, 3},
- },
- {
- desc: "MultipleBatchesMultipleLoaders",
- ctx: context.Background(),
- batchSize: 2,
- mapper: successMapper,
- err: false,
- inputs: [][]int{{0, 1}, {2}, {3, 4, 5}},
- exp: []int{0, 1, 2, 3, 4, 5},
- },
- {
- desc: "HandlesEmptyInputs",
- ctx: context.Background(),
- batchSize: 2,
- mapper: successMapper,
- err: false,
- inputs: [][]int{{0, 1, 2, 3}, nil, {4}},
- exp: []int{0, 1, 2, 3, 4},
- },
- {
- desc: "Timeout",
- ctx: expired,
- batchSize: 2,
- mapper: successMapper,
- err: true,
- inputs: [][]int{{0}},
- },
- {
- desc: "MappingFailure",
- ctx: context.Background(),
- batchSize: 2,
- mapper: errMapper,
- err: true,
- inputs: [][]int{{0}},
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- fetchers := make([]Fetcher[int, int], 0, len(tc.inputs))
- for range tc.inputs {
- fetchers = append(
- fetchers,
- FetchFunc[int, int](func(ctx context.Context, xs []int) ([]int, error) {
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
- return xs, nil
- }),
- )
- }
-
- loader := newBatchedLoader[int, int, int](
- tc.ctx,
- fetchers,
- tc.inputs,
- tc.mapper,
- tc.batchSize,
- )
-
- got, err := v1.Collect[int](loader)
- if tc.err {
- require.Error(t, err)
- return
- }
- require.NoError(t, err)
- require.Equal(t, tc.exp, got)
-
- })
- }
-
-}
diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
index 8821816958b6..262d2da291cd 100644
--- a/pkg/storage/bloom/v1/builder.go
+++ b/pkg/storage/bloom/v1/builder.go
@@ -521,7 +521,7 @@ func (b *IndexBuilder) Close() (uint32, error) {
// from a list of blocks and a store of series.
type MergeBuilder struct {
// existing blocks
- blocks []PeekingIterator[*SeriesWithBloom]
+ blocks Iterator[*SeriesWithBloom]
// store
store Iterator[*Series]
// Add chunks to a bloom
@@ -533,7 +533,7 @@ type MergeBuilder struct {
// i) When two blocks have the same series, it will prefer the one with the most chunks already indexed
// 2. iterates through the store, adding chunks to the relevant blooms via the `populate` argument
func NewMergeBuilder(
- blocks []PeekingIterator[*SeriesWithBloom],
+ blocks Iterator[*SeriesWithBloom],
store Iterator[*Series],
populate func(*Series, *Bloom) error,
) *MergeBuilder {
@@ -549,24 +549,7 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
nextInBlocks *SeriesWithBloom
)
- // Turn the list of blocks into a single iterator that returns the next series
- mergedBlocks := NewPeekingIter[*SeriesWithBloom](NewHeapIterForSeriesWithBloom(mb.blocks...))
- // two overlapping blocks can conceivably have the same series, so we need to dedupe,
- // preferring the one with the most chunks already indexed since we'll have
- // to add fewer chunks to the bloom
- deduped := NewDedupingIter[*SeriesWithBloom](
- func(a, b *SeriesWithBloom) bool {
- return a.Series.Fingerprint == b.Series.Fingerprint
- },
- Identity[*SeriesWithBloom],
- func(a, b *SeriesWithBloom) *SeriesWithBloom {
- if len(a.Series.Chunks) > len(b.Series.Chunks) {
- return a
- }
- return b
- },
- mergedBlocks,
- )
+ deduped := mb.blocks
for mb.store.Next() {
nextInStore := mb.store.At()
diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go
index ac38cdeec427..0122a35f7751 100644
--- a/pkg/storage/bloom/v1/builder_test.go
+++ b/pkg/storage/bloom/v1/builder_test.go
@@ -150,6 +150,23 @@ func TestBlockBuilderRoundTrip(t *testing.T) {
}
}
+func dedupedBlocks(blocks []PeekingIterator[*SeriesWithBloom]) Iterator[*SeriesWithBloom] {
+ orderedBlocks := NewHeapIterForSeriesWithBloom(blocks...)
+ return NewDedupingIter[*SeriesWithBloom](
+ func(a *SeriesWithBloom, b *SeriesWithBloom) bool {
+ return a.Series.Fingerprint == b.Series.Fingerprint
+ },
+ Identity[*SeriesWithBloom],
+ func(a *SeriesWithBloom, b *SeriesWithBloom) *SeriesWithBloom {
+ if len(a.Series.Chunks) > len(b.Series.Chunks) {
+ return a
+ }
+ return b
+ },
+ NewPeekingIter[*SeriesWithBloom](orderedBlocks),
+ )
+}
+
func TestMergeBuilder(t *testing.T) {
t.Parallel()
@@ -209,7 +226,7 @@ func TestMergeBuilder(t *testing.T) {
)
// Ensure that the merge builder combines all the blocks correctly
- mergeBuilder := NewMergeBuilder(blocks, storeItr, pop)
+ mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, pop)
indexBuf := bytes.NewBuffer(nil)
bloomsBuf := bytes.NewBuffer(nil)
writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
@@ -377,7 +394,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
writer := NewMemoryBlockWriter(indexBuf, bloomBuf)
reader := NewByteReader(indexBuf, bloomBuf)
mb := NewMergeBuilder(
- blocks,
+ dedupedBlocks(blocks),
dedupedStore,
func(s *Series, b *Bloom) error {
// We're not actually indexing new data in this test
diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go
index 67c0087a0d4b..14c2ea2a03f8 100644
--- a/pkg/storage/bloom/v1/util.go
+++ b/pkg/storage/bloom/v1/util.go
@@ -277,6 +277,38 @@ func (it *PeekCloseIter[T]) Close() error {
return it.close()
}
+type ResettableIterator[T any] interface {
+ Reset() error
+ Iterator[T]
+}
+
+type CloseableResettableIterator[T any] interface {
+ CloseableIterator[T]
+ ResettableIterator[T]
+}
+
+type Predicate[T any] func(T) bool
+
+func NewFilterIter[T any](it Iterator[T], p Predicate[T]) *FilterIter[T] {
+ return &FilterIter[T]{
+ Iterator: it,
+ match: p,
+ }
+}
+
+type FilterIter[T any] struct {
+ Iterator[T]
+ match Predicate[T]
+}
+
+func (i *FilterIter[T]) Next() bool {
+ hasNext := i.Iterator.Next()
+ for hasNext && !i.match(i.Iterator.At()) {
+ hasNext = i.Iterator.Next()
+ }
+ return hasNext
+}
+
type CounterIterator[T any] interface {
Iterator[T]
Count() int
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go
index f5ceb930017f..d249cb68ce56 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache.go
@@ -29,6 +29,13 @@ func (c *CloseableBlockQuerier) Close() error {
return nil
}
+func (c *CloseableBlockQuerier) SeriesIter() (v1.PeekingIterator[*v1.SeriesWithBloom], error) {
+ if err := c.Reset(); err != nil {
+ return nil, err
+ }
+ return v1.NewPeekingIter[*v1.SeriesWithBloom](c.BlockQuerier), nil
+}
+
func NewBlocksCache(cfg cache.EmbeddedCacheConfig, reg prometheus.Registerer, logger log.Logger) *cache.EmbeddedCache[string, BlockDirectory] {
return cache.NewTypedEmbeddedCache[string, BlockDirectory](
"bloom-blocks-cache",
From f158b5bc3e94ce9c32aaa91ca259c9b074620284 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Thu, 15 Feb 2024 15:39:29 -0800
Subject: [PATCH 078/130] correctly set block iter when no overlapping blocks
are found (#11973)
---
pkg/bloomcompactor/batch.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
index e0787c1f6f1e..bed0834a86b7 100644
--- a/pkg/bloomcompactor/batch.go
+++ b/pkg/bloomcompactor/batch.go
@@ -285,6 +285,7 @@ func (i *blockLoadingIter) Filter(filter func(*bloomshipper.CloseableBlockQuerie
func (i *blockLoadingIter) loadNext() bool {
// check if there are more overlapping groups to load
if !i.overlapping.Next() {
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
return false
}
From 31711d555842a6bb748dbc748e4ae629bbe12cf4 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Fri, 16 Feb 2024 12:01:47 +0100
Subject: [PATCH 079/130] (chore) Bloom gateway: Improve fingerprint
partitioning in client (#11971)
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/client.go | 26 +++++++++++++++-----------
pkg/bloomgateway/client_test.go | 28 ++++++++++++++++++++++++++++
pkg/bloomutils/ring.go | 4 ++--
3 files changed, 45 insertions(+), 13 deletions(-)
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index 28400749404c..e5fd35d884fb 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -7,7 +7,6 @@ import (
"io"
"math"
"math/rand"
- "sort"
"sync"
"github.com/go-kit/log"
@@ -20,6 +19,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
+ "golang.org/x/exp/slices"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
@@ -326,11 +326,6 @@ func serverAddressesWithTokenRanges(subRing ring.ReadRing, instances []ring.Inst
return servers, nil
}
-type instanceWithToken struct {
- instance ring.InstanceDesc
- token uint32
-}
-
type addrsWithTokenRange struct {
id string
addrs []string
@@ -348,13 +343,22 @@ type instanceWithFingerprints struct {
func partitionFingerprintsByAddresses(fingerprints []*logproto.GroupedChunkRefs, addresses []addrsWithTokenRange) (result []instanceWithFingerprints) {
for _, instance := range addresses {
-
- min := sort.Search(len(fingerprints), func(i int) bool {
- return instance.cmp(uint32(fingerprints[i].Fingerprint)) > v1.Before
+ min, _ := slices.BinarySearchFunc(fingerprints, instance.tokenRange, func(g *logproto.GroupedChunkRefs, r bloomutils.Range[uint32]) int {
+ if uint32(g.Fingerprint) < r.Min {
+ return -1
+ } else if uint32(g.Fingerprint) > r.Min {
+ return 1
+ }
+ return 0
})
- max := sort.Search(len(fingerprints), func(i int) bool {
- return instance.cmp(uint32(fingerprints[i].Fingerprint)) == v1.After
+ max, _ := slices.BinarySearchFunc(fingerprints, instance.tokenRange, func(g *logproto.GroupedChunkRefs, r bloomutils.Range[uint32]) int {
+ if uint32(g.Fingerprint) <= r.Max {
+ return -1
+ } else if uint32(g.Fingerprint) > r.Max {
+ return 1
+ }
+ return 0
})
// fingerprint is out of boundaries
diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go
index 440347d1b248..8a9a3d35646c 100644
--- a/pkg/bloomgateway/client_test.go
+++ b/pkg/bloomgateway/client_test.go
@@ -2,6 +2,7 @@ package bloomgateway
import (
"context"
+ "fmt"
"math"
"sort"
"testing"
@@ -165,6 +166,33 @@ func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
})
}
+func BenchmarkPartitionFingerprintsByAddresses(b *testing.B) {
+ numFp := 100000
+ fpStep := math.MaxUint64 / uint64(numFp)
+
+ groups := make([]*logproto.GroupedChunkRefs, 0, numFp)
+ for i := uint64(0); i < math.MaxUint64-fpStep; i += fpStep {
+ groups = append(groups, &logproto.GroupedChunkRefs{Fingerprint: i})
+ }
+
+ numServers := 100
+ tokenStep := math.MaxUint32 / uint32(numServers)
+ servers := make([]addrsWithTokenRange, 0, numServers)
+ for i := uint32(0); i < math.MaxUint32-tokenStep; i += tokenStep {
+ servers = append(servers, addrsWithTokenRange{
+ id: fmt.Sprintf("instance-%x", i),
+ addrs: []string{fmt.Sprintf("%d", i)},
+ tokenRange: newTr(i, i+tokenStep),
+ })
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ _ = partitionFingerprintsByAddresses(groups, servers)
+ }
+}
+
func TestBloomGatewayClient_ServerAddressesWithTokenRanges(t *testing.T) {
testCases := map[string]struct {
instances []ring.InstanceDesc
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index 102d3ed5e9a5..d2aebe5b88a3 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -20,7 +20,7 @@ var (
Uint64Range = Range[uint64]{Min: 0, Max: math.MaxUint64}
)
-type Range[T constraints.Integer] struct {
+type Range[T constraints.Unsigned] struct {
Min, Max T
}
@@ -72,7 +72,7 @@ func (i InstancesWithTokenRange) Contains(token uint32) bool {
// with given id based on the first token in the ring.
// This assumes that each instance in the ring is configured with only a single
// token.
-func KeyRangeForInstance[T constraints.Integer](id string, instances []ring.InstanceDesc, keyspace Range[T]) (Range[T], error) {
+func KeyRangeForInstance[T constraints.Unsigned](id string, instances []ring.InstanceDesc, keyspace Range[T]) (Range[T], error) {
// Sort instances -- they may not be sorted
// because they're usually accessed by looking up the tokens (which are sorted)
From 5d1798f16d24b12e53bebb5a02eae33a160e7b61 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Fri, 16 Feb 2024 03:14:21 -0800
Subject: [PATCH 080/130] Blooms/misc fixes (#11974)
---
docs/sources/configure/_index.md | 2 +-
pkg/bloomcompactor/bloomcompactor.go | 2 +-
pkg/storage/bloom/v1/builder.go | 2 +-
pkg/validation/limits.go | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index c30f8da01fa2..d3c5593b4da2 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -3155,7 +3155,7 @@ shard_streams:
# Skip factor for the n-grams created when computing blooms from log lines.
# CLI flag: -bloom-compactor.ngram-skip
-[bloom_ngram_skip: | default = 0]
+[bloom_ngram_skip: | default = 1]
# Scalable Bloom Filter desired false-positive rate.
# CLI flag: -bloom-compactor.false-positive-rate
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index e8dc880f9d9d..cc752c2224a6 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -92,7 +92,7 @@ func New(
c.bloomStore = bloomStore
// initialize metrics
- c.btMetrics = v1.NewMetrics(prometheus.WrapRegistererWithPrefix("loki_bloom_tokenizer", r))
+ c.btMetrics = v1.NewMetrics(prometheus.WrapRegistererWithPrefix("loki_bloom_tokenizer_", r))
c.metrics = NewMetrics(r, c.btMetrics)
chunkLoader := NewStoreChunkLoader(
diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
index 262d2da291cd..d2d51b557e5d 100644
--- a/pkg/storage/bloom/v1/builder.go
+++ b/pkg/storage/bloom/v1/builder.go
@@ -15,7 +15,7 @@ import (
)
var (
- DefaultBlockOptions = NewBlockOptions(4, 0, 50<<20) // 50MB
+ DefaultBlockOptions = NewBlockOptions(4, 1, 50<<20) // 50MB
)
type BlockOptions struct {
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 13885c0fcb52..9627718aa8ec 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -334,7 +334,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&l.BloomCompactorEnabled, "bloom-compactor.enable-compaction", false, "Whether to compact chunks into bloom filters.")
f.IntVar(&l.BloomCompactorChunksBatchSize, "bloom-compactor.chunks-batch-size", 100, "The batch size of the chunks the bloom-compactor downloads at once.")
f.IntVar(&l.BloomNGramLength, "bloom-compactor.ngram-length", 4, "Length of the n-grams created when computing blooms from log lines.")
- f.IntVar(&l.BloomNGramSkip, "bloom-compactor.ngram-skip", 0, "Skip factor for the n-grams created when computing blooms from log lines.")
+ f.IntVar(&l.BloomNGramSkip, "bloom-compactor.ngram-skip", 1, "Skip factor for the n-grams created when computing blooms from log lines.")
f.Float64Var(&l.BloomFalsePositiveRate, "bloom-compactor.false-positive-rate", 0.01, "Scalable Bloom Filter desired false-positive rate.")
f.IntVar(&l.BloomGatewayBlocksDownloadingParallelism, "bloom-gateway.blocks-downloading-parallelism", 50, "Maximum number of blocks will be downloaded in parallel by the Bloom Gateway.")
f.DurationVar(&l.BloomGatewayCacheKeyInterval, "bloom-gateway.cache-key-interval", 15*time.Minute, "Interval for computing the cache key in the Bloom Gateway.")
From ef40136715d0bf6cbeeac7ca4e70632b1343278d Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Fri, 16 Feb 2024 17:08:03 +0100
Subject: [PATCH 081/130] Use TSDB index prefix on blooms directory (#11977)
---
pkg/bloomcompactor/bloomcompactor.go | 38 +++++++++++++++++++-------
pkg/bloomcompactor/controller.go | 12 ++++----
pkg/bloomcompactor/tsdb.go | 24 ++++++++--------
pkg/bloomgateway/util_test.go | 2 +-
pkg/storage/config/schema_config.go | 41 ++++++++++++++++------------
5 files changed, 71 insertions(+), 46 deletions(-)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index cc752c2224a6..3bb1c815e829 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -179,11 +179,11 @@ func runWithRetries(
type tenantTable struct {
tenant string
- table config.DayTime
+ table config.DayTable
ownershipRange v1.FingerprintBounds
}
-func (c *Compactor) tenants(ctx context.Context, table config.DayTime) (v1.Iterator[string], error) {
+func (c *Compactor) tenants(ctx context.Context, table config.DayTable) (v1.Iterator[string], error) {
tenants, err := c.tsdbStore.UsersForPeriod(ctx, table)
if err != nil {
return nil, errors.Wrap(err, "getting tenants")
@@ -241,15 +241,15 @@ func (c *Compactor) tables(ts time.Time) *dayRangeIterator {
fromDay := config.NewDayTime(model.TimeFromUnixNano(from))
throughDay := config.NewDayTime(model.TimeFromUnixNano(through))
- return newDayRangeIterator(fromDay, throughDay)
+ return newDayRangeIterator(fromDay, throughDay, c.schemaCfg)
}
func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
tables := c.tables(time.Now())
for tables.Next() && tables.Err() == nil && ctx.Err() == nil {
-
table := tables.At()
+
tenants, err := c.tenants(ctx, table)
if err != nil {
return errors.Wrap(err, "getting tenants")
@@ -269,7 +269,11 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
c.metrics.tenantsOwned.Inc()
select {
- case ch <- tenantTable{tenant: tenant, table: table, ownershipRange: ownershipRange}:
+ case ch <- tenantTable{
+ tenant: tenant,
+ table: table,
+ ownershipRange: ownershipRange,
+ }:
case <-ctx.Done():
return ctx.Err()
}
@@ -332,19 +336,33 @@ func (c *Compactor) compactTenantTable(ctx context.Context, tt tenantTable) erro
type dayRangeIterator struct {
min, max, cur config.DayTime
+ curPeriod config.PeriodConfig
+ schemaCfg config.SchemaConfig
+ err error
}
-func newDayRangeIterator(min, max config.DayTime) *dayRangeIterator {
- return &dayRangeIterator{min: min, max: max, cur: min.Dec()}
+func newDayRangeIterator(min, max config.DayTime, schemaCfg config.SchemaConfig) *dayRangeIterator {
+ return &dayRangeIterator{min: min, max: max, cur: min.Dec(), schemaCfg: schemaCfg}
}
func (r *dayRangeIterator) Next() bool {
r.cur = r.cur.Inc()
- return r.cur.Before(r.max)
+ if !r.cur.Before(r.max) {
+ return false
+ }
+
+ period, err := r.schemaCfg.SchemaForTime(r.cur.ModelTime())
+ if err != nil {
+ r.err = errors.Wrapf(err, "getting schema for time (%s)", r.cur)
+ return false
+ }
+ r.curPeriod = period
+
+ return true
}
-func (r *dayRangeIterator) At() config.DayTime {
- return r.cur
+func (r *dayRangeIterator) At() config.DayTable {
+ return config.NewDayTable(r.cur, r.curPeriod.IndexTables.Prefix)
}
func (r *dayRangeIterator) Err() error {
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index cee0e6f05820..ef41ec2d8efb 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -66,15 +66,15 @@ Compaction works as follows, split across many functions for clarity:
*/
func (s *SimpleBloomController) compactTenant(
ctx context.Context,
- table config.DayTime,
+ table config.DayTable,
tenant string,
ownershipRange v1.FingerprintBounds,
) error {
- logger := log.With(s.logger, "ownership", ownershipRange, "org_id", tenant, "table", table)
+ logger := log.With(s.logger, "ownership", ownershipRange, "org_id", tenant, "table", table.Addr())
client, err := s.bloomStore.Client(table.ModelTime())
if err != nil {
- level.Error(logger).Log("msg", "failed to get client", "err", err, "table", table.Addr())
+ level.Error(logger).Log("msg", "failed to get client", "err", err)
return errors.Wrap(err, "failed to get client")
}
@@ -175,7 +175,7 @@ func (s *SimpleBloomController) compactTenant(
func (s *SimpleBloomController) findOutdatedGaps(
ctx context.Context,
tenant string,
- table config.DayTime,
+ table config.DayTable,
ownershipRange v1.FingerprintBounds,
metas []bloomshipper.Meta,
logger log.Logger,
@@ -215,7 +215,7 @@ func (s *SimpleBloomController) findOutdatedGaps(
func (s *SimpleBloomController) loadWorkForGap(
ctx context.Context,
- table config.DayTime,
+ table config.DayTable,
tenant string,
id tsdb.Identifier,
gap gapWithBlocks,
@@ -241,7 +241,7 @@ func (s *SimpleBloomController) loadWorkForGap(
func (s *SimpleBloomController) buildGaps(
ctx context.Context,
tenant string,
- table config.DayTime,
+ table config.DayTable,
client bloomshipper.Client,
work []blockPlan,
logger log.Logger,
diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go
index d19e185a9275..6159ce02a804 100644
--- a/pkg/bloomcompactor/tsdb.go
+++ b/pkg/bloomcompactor/tsdb.go
@@ -26,11 +26,11 @@ const (
)
type TSDBStore interface {
- UsersForPeriod(ctx context.Context, table config.DayTime) ([]string, error)
- ResolveTSDBs(ctx context.Context, table config.DayTime, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
+ UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error)
+ ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
LoadTSDB(
ctx context.Context,
- table config.DayTime,
+ table config.DayTable,
tenant string,
id tsdb.Identifier,
bounds v1.FingerprintBounds,
@@ -49,12 +49,12 @@ func NewBloomTSDBStore(storage storage.Client) *BloomTSDBStore {
}
}
-func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table config.DayTime) ([]string, error) {
+func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) {
_, users, err := b.storage.ListFiles(ctx, table.Addr(), true) // bypass cache for ease of testing
return users, err
}
-func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTime, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
+func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
indices, err := b.storage.ListUserFiles(ctx, table.Addr(), tenant, true) // bypass cache for ease of testing
if err != nil {
return nil, errors.Wrap(err, "failed to list user files")
@@ -80,7 +80,7 @@ func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTime,
func (b *BloomTSDBStore) LoadTSDB(
ctx context.Context,
- table config.DayTime,
+ table config.DayTable,
tenant string,
id tsdb.Identifier,
bounds v1.FingerprintBounds,
@@ -272,8 +272,8 @@ func (s *TSDBStores) storeForPeriod(table config.DayTime) (TSDBStore, error) {
)
}
-func (s *TSDBStores) UsersForPeriod(ctx context.Context, table config.DayTime) ([]string, error) {
- store, err := s.storeForPeriod(table)
+func (s *TSDBStores) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) {
+ store, err := s.storeForPeriod(table.DayTime)
if err != nil {
return nil, err
}
@@ -281,8 +281,8 @@ func (s *TSDBStores) UsersForPeriod(ctx context.Context, table config.DayTime) (
return store.UsersForPeriod(ctx, table)
}
-func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTime, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
- store, err := s.storeForPeriod(table)
+func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
+ store, err := s.storeForPeriod(table.DayTime)
if err != nil {
return nil, err
}
@@ -292,12 +292,12 @@ func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTime, ten
func (s *TSDBStores) LoadTSDB(
ctx context.Context,
- table config.DayTime,
+ table config.DayTable,
tenant string,
id tsdb.Identifier,
bounds v1.FingerprintBounds,
) (v1.CloseableIterator[*v1.Series], error) {
- store, err := s.storeForPeriod(table)
+ store, err := s.storeForPeriod(table.DayTime)
if err != nil {
return nil, err
}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 9b5ce6e897bb..281feba4b29a 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -311,7 +311,7 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
}
ref := bloomshipper.Ref{
TenantID: tenant,
- TableName: config.NewDayTime(truncateDay(from)).Addr(),
+ TableName: config.NewDayTable(config.NewDayTime(truncateDay(from)), "").Addr(),
Bounds: v1.NewBounds(fromFp, throughFp),
StartTimestamp: from,
EndTimestamp: through,
diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
index b7c92c62c3d9..968ca87e609b 100644
--- a/pkg/storage/config/schema_config.go
+++ b/pkg/storage/config/schema_config.go
@@ -200,10 +200,6 @@ func (cfg *PeriodConfig) GetIndexTableNumberRange(schemaEndDate DayTime) TableRa
}
}
-func (cfg *PeriodConfig) GetFullTableName(t model.Time) string {
- return NewDayTime(t).TableWithPrefix(cfg)
-}
-
func NewDayTime(d model.Time) DayTime {
return DayTime{d}
}
@@ -237,19 +233,6 @@ func (d DayTime) String() string {
return d.Time.Time().UTC().Format("2006-01-02")
}
-// Addr returns the unix day offset as a string, which is used
-// as the address for the index table in storage.
-func (d DayTime) Addr() string {
- return fmt.Sprintf("%d",
- d.ModelTime().Time().UnixNano()/int64(ObjectStorageIndexRequiredPeriod))
-}
-
-func (d DayTime) TableWithPrefix(cfg *PeriodConfig) string {
- return fmt.Sprintf("%s%d",
- cfg.IndexTables.Prefix,
- d.ModelTime().Time().UnixNano()/int64(ObjectStorageIndexRequiredPeriod))
-}
-
func (d DayTime) Inc() DayTime {
return DayTime{d.Add(ObjectStorageIndexRequiredPeriod)}
}
@@ -274,6 +257,30 @@ func (d DayTime) Bounds() (model.Time, model.Time) {
return d.Time, d.Inc().Time
}
+type DayTable struct {
+ DayTime
+ Prefix string
+}
+
+func (d DayTable) String() string {
+ return d.Addr()
+}
+
+func NewDayTable(d DayTime, prefix string) DayTable {
+ return DayTable{
+ DayTime: d,
+ Prefix: prefix,
+ }
+}
+
+// Addr returns the prefix (if any) and the unix day offset as a string, which is used
+// as the address for the index table in storage.
+func (d DayTable) Addr() string {
+ return fmt.Sprintf("%s%d",
+ d.Prefix,
+ d.ModelTime().Time().UnixNano()/int64(ObjectStorageIndexRequiredPeriod))
+}
+
// SchemaConfig contains the config for our chunk index schemas
type SchemaConfig struct {
Configs []PeriodConfig `yaml:"configs"`
From fb728a6781f3b6739270960a43298c2a98eb1c79 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Fri, 16 Feb 2024 17:12:55 +0100
Subject: [PATCH 082/130] (chore) Cleanup duplicate functions/strucs in
bloomgateway package (#11978)
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/processor.go | 48 +++--------------------------------
pkg/bloomgateway/util.go | 14 +++++-----
pkg/bloomgateway/util_test.go | 6 ++---
3 files changed, 15 insertions(+), 53 deletions(-)
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 5eab7a858c74..7d1d68785397 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -3,7 +3,6 @@ package bloomgateway
import (
"context"
"math"
- "sort"
"time"
"github.com/go-kit/log"
@@ -13,11 +12,6 @@ import (
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
-type tasksForBlock struct {
- blockRef bloomshipper.BlockRef
- tasks []Task
-}
-
func newProcessor(id string, store bloomshipper.Store, logger log.Logger, metrics *workerMetrics) *processor {
return &processor{
id: id,
@@ -66,13 +60,13 @@ func (p *processor) processTasks(ctx context.Context, tenant string, day config.
p.metrics.metasFetched.WithLabelValues(p.id).Observe(float64(len(metas)))
blocksRefs := bloomshipper.BlocksForMetas(metas, interval, keyspaces)
- return p.processBlocks(ctx, partition(tasks, blocksRefs))
+ return p.processBlocks(ctx, partitionTasks(tasks, blocksRefs))
}
-func (p *processor) processBlocks(ctx context.Context, data []tasksForBlock) error {
+func (p *processor) processBlocks(ctx context.Context, data []blockWithTasks) error {
refs := make([]bloomshipper.BlockRef, len(data))
for _, block := range data {
- refs = append(refs, block.blockRef)
+ refs = append(refs, block.ref)
}
bqs, err := p.store.FetchBlocks(ctx, refs)
@@ -87,7 +81,7 @@ outer:
for blockIter.Next() {
bq := blockIter.At()
for i, block := range data {
- if block.blockRef.Bounds.Equal(bq.Bounds) {
+ if block.ref.Bounds.Equal(bq.Bounds) {
err := p.processBlock(ctx, bq.BlockQuerier, block.tasks)
bq.Close()
if err != nil {
@@ -146,37 +140,3 @@ func group[K comparable, V any, S ~[]V](s S, f func(v V) K) map[K]S {
}
return m
}
-
-func partition(tasks []Task, blocks []bloomshipper.BlockRef) []tasksForBlock {
- result := make([]tasksForBlock, 0, len(blocks))
-
- for _, block := range blocks {
- bounded := tasksForBlock{
- blockRef: block,
- }
-
- for _, task := range tasks {
- refs := task.series
- min := sort.Search(len(refs), func(i int) bool {
- return block.Cmp(refs[i].Fingerprint) > v1.Before
- })
-
- max := sort.Search(len(refs), func(i int) bool {
- return block.Cmp(refs[i].Fingerprint) == v1.After
- })
-
- // All fingerprints fall outside of the consumer's range
- if min == len(refs) || max == 0 {
- continue
- }
-
- bounded.tasks = append(bounded.tasks, task.Copy(refs[min:max]))
- }
-
- if len(bounded.tasks) > 0 {
- result = append(result, bounded)
- }
-
- }
- return result
-}
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index 3793076f7c38..3ab234aaa8ae 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -83,15 +83,17 @@ func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs {
return result
}
-type boundedTasks struct {
- blockRef bloomshipper.BlockRef
- tasks []Task
+type blockWithTasks struct {
+ ref bloomshipper.BlockRef
+ tasks []Task
}
-func partitionFingerprintRange(tasks []Task, blocks []bloomshipper.BlockRef) (result []boundedTasks) {
+func partitionTasks(tasks []Task, blocks []bloomshipper.BlockRef) []blockWithTasks {
+ result := make([]blockWithTasks, 0, len(blocks))
+
for _, block := range blocks {
- bounded := boundedTasks{
- blockRef: block,
+ bounded := blockWithTasks{
+ ref: block,
}
for _, task := range tasks {
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 281feba4b29a..6bc43cf79434 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -73,7 +73,7 @@ func mkBlockRef(minFp, maxFp uint64) bloomshipper.BlockRef {
}
}
-func TestPartitionFingerprintRange(t *testing.T) {
+func TestPartitionTasks(t *testing.T) {
t.Run("consecutive block ranges", func(t *testing.T) {
bounds := []bloomshipper.BlockRef{
@@ -93,7 +93,7 @@ func TestPartitionFingerprintRange(t *testing.T) {
tasks[i%nTasks].series = append(tasks[i%nTasks].series, &logproto.GroupedChunkRefs{Fingerprint: uint64(i)})
}
- results := partitionFingerprintRange(tasks, bounds)
+ results := partitionTasks(tasks, bounds)
require.Equal(t, 3, len(results)) // ensure we only return bounds in range
actualFingerprints := make([]*logproto.GroupedChunkRefs, 0, nSeries)
@@ -128,7 +128,7 @@ func TestPartitionFingerprintRange(t *testing.T) {
task.series = append(task.series, &logproto.GroupedChunkRefs{Fingerprint: uint64(i)})
}
- results := partitionFingerprintRange([]Task{task}, bounds)
+ results := partitionTasks([]Task{task}, bounds)
require.Equal(t, 3, len(results)) // ensure we only return bounds in range
for _, res := range results {
// ensure we have the right number of tasks per bound
From 918e7365dbbe4ac33ec02b803c6cf7155eb4a9e1 Mon Sep 17 00:00:00 2001
From: Travis Patterson
Date: Fri, 16 Feb 2024 09:22:45 -0700
Subject: [PATCH 083/130] Set compactor memory limits (#11970)
---
CHANGELOG.md | 1 +
production/ksonnet/loki/shipper.libsonnet | 1 +
2 files changed, 2 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ca4560401243..8abd9a846458 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -57,6 +57,7 @@
* [11499](https://github.com/grafana/loki/pull/11284) **jmichalek132** Config: Adds `frontend.log-query-request-headers` to enable logging of request headers in query logs.
* [11817](https://github.com/grafana/loki/pull/11817) **ashwanthgoli** Ruler: Add support for filtering results of `/prometheus/api/v1/rules` endpoint by rule_name, rule_group, file and type.
* [11897](https://github.com/grafana/loki/pull/11897) **ashwanthgoli** Metadata: Introduces a separate split interval of `split_recent_metadata_queries_by_interval` for `recent_metadata_query_window` to help with caching recent metadata query results.
+* [11970](https://github.com/grafana/loki/pull/11897) **masslessparticle** Ksonnet: Introduces memory limits to the compactor configuration to avoid unbounded memory usage.
##### Fixes
* [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var.
diff --git a/production/ksonnet/loki/shipper.libsonnet b/production/ksonnet/loki/shipper.libsonnet
index 18f38f3ab89e..5fe8320af8ae 100644
--- a/production/ksonnet/loki/shipper.libsonnet
+++ b/production/ksonnet/loki/shipper.libsonnet
@@ -62,6 +62,7 @@
container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port) +
container.mixin.readinessProbe.withTimeoutSeconds(1) +
k.util.resourcesRequests('4', '2Gi') +
+ k.util.resourcesLimits(null, '4Gi') +
container.withEnvMixin($._config.commonEnvs)
else {},
From 9f86473b0a2dedb302c2dcc93e123ea3cde62303 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Mon, 19 Feb 2024 10:04:14 +0100
Subject: [PATCH 084/130] Various bloom gateway instrumentation changes
(#11983)
See individual commit messages for details.
* Improve help text for chunk removals metric
* End counter metric with _total
* Fix incorrect observation of failed tasks
* Observe chunk removals from block querier
* Move chunks/series metrics into bloom querier
* Observe total/filtered series and total/filtered chunks in bloom gateway
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/bloomgateway.go | 82 ++++++++++++---------------
pkg/bloomgateway/bloomgateway_test.go | 9 ++-
pkg/bloomgateway/querier.go | 55 +++++++++++++++++-
pkg/bloomgateway/worker.go | 2 +-
4 files changed, 98 insertions(+), 50 deletions(-)
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index 58f709f0be2f..4e36e5ce3018 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -82,10 +82,9 @@ var (
)
type metrics struct {
- queueDuration prometheus.Histogram
- inflightRequests prometheus.Summary
- chunkRefsUnfiltered prometheus.Counter
- chunkRefsFiltered prometheus.Counter
+ queueDuration prometheus.Histogram
+ inflightRequests prometheus.Summary
+ chunkRemovals *prometheus.CounterVec
}
func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) *metrics {
@@ -106,29 +105,15 @@ func newMetrics(registerer prometheus.Registerer, namespace, subsystem string) *
MaxAge: time.Minute,
AgeBuckets: 6,
}),
- chunkRefsUnfiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
+ chunkRemovals: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
- Name: "chunkrefs_pre_filtering",
- Help: "Total amount of chunk refs pre filtering. Does not count chunk refs in failed requests.",
- }),
- chunkRefsFiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: "chunkrefs_post_filtering",
- Help: "Total amount of chunk refs post filtering.",
- }),
+ Name: "chunk_removals_total",
+ Help: "Total amount of removals received from the block querier partitioned by state. The state 'accepted' means that the removals are processed, the state 'dropped' means that the removals were received after the task context was done (e.g. client timeout, etc).",
+ }, []string{"state"}),
}
}
-func (m *metrics) addUnfilteredCount(n int) {
- m.chunkRefsUnfiltered.Add(float64(n))
-}
-
-func (m *metrics) addFilteredCount(n int) {
- m.chunkRefsFiltered.Add(float64(n))
-}
-
// SyncMap is a map structure which can be synchronized using the RWMutex
type SyncMap[k comparable, v any] struct {
sync.RWMutex
@@ -324,12 +309,8 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
return nil, errors.New("from time must not be after through time")
}
- numChunksUnfiltered := len(req.Refs)
-
// Shortcut if request does not contain filters
if len(req.Filters) == 0 {
- g.metrics.addUnfilteredCount(numChunksUnfiltered)
- g.metrics.addFilteredCount(len(req.Refs))
return &logproto.FilterChunkRefResponse{
ChunkRefs: req.Refs,
}, nil
@@ -374,15 +355,14 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
// When enqueuing, we also add the task to the pending tasks
g.pendingTasks.Add(task.ID, task)
})
- go consumeTask(ctx, task, tasksCh, logger)
+ go g.consumeTask(ctx, task, tasksCh)
}
responses := responsesPool.Get(numSeries)
defer responsesPool.Put(responses)
remaining := len(tasks)
-outer:
- for {
+ for remaining > 0 {
select {
case <-ctx.Done():
return nil, errors.Wrap(ctx.Err(), "request failed")
@@ -393,23 +373,17 @@ outer:
}
responses = append(responses, task.responses...)
remaining--
- if remaining == 0 {
- break outer
- }
}
}
- for _, o := range responses {
- if o.Removals.Len() == 0 {
- continue
- }
- removeNotMatchingChunks(req, o, g.logger)
- }
+ preFilterSeries := len(req.Refs)
- g.metrics.addUnfilteredCount(numChunksUnfiltered)
- g.metrics.addFilteredCount(len(req.Refs))
+ // TODO(chaudum): Don't wait for all responses before starting to filter chunks.
+ filtered := g.processResponses(req, responses)
- level.Info(logger).Log("msg", "return filtered chunk refs", "unfiltered", numChunksUnfiltered, "filtered", len(req.Refs))
+ postFilterSeries := len(req.Refs)
+
+ level.Info(logger).Log("msg", "return filtered chunk refs", "pre_filter_series", preFilterSeries, "post_filter_series", postFilterSeries, "filtered_chunks", filtered)
return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil
}
@@ -419,16 +393,18 @@ outer:
// task is closed by the worker.
// Once the tasks is closed, it will send the task with the results from the
// block querier to the supplied task channel.
-func consumeTask(ctx context.Context, task Task, tasksCh chan<- Task, logger log.Logger) {
- logger = log.With(logger, "task", task.ID)
+func (g *Gateway) consumeTask(ctx context.Context, task Task, tasksCh chan<- Task) {
+ logger := log.With(g.logger, "task", task.ID)
for res := range task.resCh {
select {
case <-ctx.Done():
level.Debug(logger).Log("msg", "drop partial result", "fp_int", uint64(res.Fp), "fp_hex", res.Fp, "chunks_to_remove", res.Removals.Len())
+ g.metrics.chunkRemovals.WithLabelValues("dropped").Add(float64(res.Removals.Len()))
default:
level.Debug(logger).Log("msg", "accept partial result", "fp_int", uint64(res.Fp), "fp_hex", res.Fp, "chunks_to_remove", res.Removals.Len())
task.responses = append(task.responses, res)
+ g.metrics.chunkRemovals.WithLabelValues("accepted").Add(float64(res.Removals.Len()))
}
}
@@ -441,7 +417,18 @@ func consumeTask(ctx context.Context, task Task, tasksCh chan<- Task, logger log
}
}
-func removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output, logger log.Logger) {
+func (g *Gateway) processResponses(req *logproto.FilterChunkRefRequest, responses []v1.Output) (filtered int) {
+ for _, o := range responses {
+ if o.Removals.Len() == 0 {
+ continue
+ }
+ filtered += g.removeNotMatchingChunks(req, o)
+ }
+ return
+}
+
+func (g *Gateway) removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output) (filtered int) {
+
// binary search index of fingerprint
idx := sort.Search(len(req.Refs), func(i int) bool {
return req.Refs[i].Fingerprint >= uint64(res.Fp)
@@ -449,13 +436,15 @@ func removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output,
// fingerprint not found
if idx >= len(req.Refs) {
- level.Error(logger).Log("msg", "index out of range", "idx", idx, "len", len(req.Refs), "fp", uint64(res.Fp))
+ level.Error(g.logger).Log("msg", "index out of range", "idx", idx, "len", len(req.Refs), "fp", uint64(res.Fp))
return
}
// if all chunks of a fingerprint are are removed
// then remove the whole group from the response
if len(req.Refs[idx].Refs) == res.Removals.Len() {
+ filtered += len(req.Refs[idx].Refs)
+
req.Refs[idx] = nil // avoid leaking pointer
req.Refs = append(req.Refs[:idx], req.Refs[idx+1:]...)
return
@@ -465,10 +454,13 @@ func removeNotMatchingChunks(req *logproto.FilterChunkRefRequest, res v1.Output,
toRemove := res.Removals[i]
for j := 0; j < len(req.Refs[idx].Refs); j++ {
if toRemove.Checksum == req.Refs[idx].Refs[j].Checksum {
+ filtered += 1
+
req.Refs[idx].Refs[j] = nil // avoid leaking pointer
req.Refs[idx].Refs = append(req.Refs[idx].Refs[:j], req.Refs[idx].Refs[j+1:]...)
j-- // since we removed the current item at index, we have to redo the same index
}
}
}
+ return
}
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index f07e014b84dc..fede86484a96 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -423,6 +423,9 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
}
func TestBloomGateway_RemoveNotMatchingChunks(t *testing.T) {
+ g := &Gateway{
+ logger: log.NewNopLogger(),
+ }
t.Run("removing chunks partially", func(t *testing.T) {
req := &logproto.FilterChunkRefRequest{
Refs: []*logproto.GroupedChunkRefs{
@@ -450,7 +453,8 @@ func TestBloomGateway_RemoveNotMatchingChunks(t *testing.T) {
}},
},
}
- removeNotMatchingChunks(req, res, log.NewNopLogger())
+ n := g.removeNotMatchingChunks(req, res)
+ require.Equal(t, 2, n)
require.Equal(t, expected, req)
})
@@ -474,7 +478,8 @@ func TestBloomGateway_RemoveNotMatchingChunks(t *testing.T) {
expected := &logproto.FilterChunkRefRequest{
Refs: []*logproto.GroupedChunkRefs{},
}
- removeNotMatchingChunks(req, res, log.NewNopLogger())
+ n := g.removeNotMatchingChunks(req, res)
+ require.Equal(t, 3, n)
require.Equal(t, expected, req)
})
diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go
index 4b2366e83f28..02608bfdf71c 100644
--- a/pkg/bloomgateway/querier.go
+++ b/pkg/bloomgateway/querier.go
@@ -5,17 +5,56 @@ import (
"sort"
"github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
)
+type querierMetrics struct {
+ chunksTotal prometheus.Counter
+ chunksFiltered prometheus.Counter
+ seriesTotal prometheus.Counter
+ seriesFiltered prometheus.Counter
+}
+
+func newQuerierMetrics(registerer prometheus.Registerer, namespace, subsystem string) *querierMetrics {
+ return &querierMetrics{
+ chunksTotal: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "chunks_total",
+ Help: "Total amount of chunks pre filtering. Does not count chunks in failed requests.",
+ }),
+ chunksFiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "chunks_filtered_total",
+ Help: "Total amount of chunks that have been filtered out. Does not count chunks in failed requests.",
+ }),
+ seriesTotal: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "series_total",
+ Help: "Total amount of series pre filtering. Does not count series in failed requests.",
+ }),
+ seriesFiltered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "series_filtered_total",
+ Help: "Total amount of series that have been filtered out. Does not count series in failed requests.",
+ }),
+ }
+}
+
// BloomQuerier is a store-level abstraction on top of Client
// It is used by the index gateway to filter ChunkRefs based on given line fiter expression.
type BloomQuerier struct {
- c Client
- logger log.Logger
+ c Client
+ logger log.Logger
+ metrics *querierMetrics
}
func NewQuerier(c Client, logger log.Logger) *BloomQuerier {
@@ -37,6 +76,9 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
defer groupedChunksRefPool.Put(grouped)
grouped = groupChunkRefs(chunkRefs, grouped)
+ preFilterChunks := len(chunkRefs)
+ preFilterSeries := len(grouped)
+
refs, err := bq.c.FilterChunks(ctx, tenant, from, through, grouped, filters...)
if err != nil {
return nil, err
@@ -55,6 +97,15 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
})
}
}
+
+ postFilterChunks := len(result)
+ postFilterSeries := len(refs)
+
+ bq.metrics.chunksTotal.Add(float64(preFilterChunks))
+ bq.metrics.chunksFiltered.Add(float64(preFilterChunks - postFilterChunks))
+ bq.metrics.seriesTotal.Add(float64(preFilterSeries))
+ bq.metrics.seriesFiltered.Add(float64(preFilterSeries - postFilterSeries))
+
return result, nil
}
diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go
index 5c57c0a2e495..ec44081c1b30 100644
--- a/pkg/bloomgateway/worker.go
+++ b/pkg/bloomgateway/worker.go
@@ -163,7 +163,7 @@ func (w *worker) running(_ context.Context) error {
err = p.run(taskCtx, tasks)
if err != nil {
- w.metrics.processDuration.WithLabelValues(w.id, labelSuccess).Observe(time.Since(start).Seconds())
+ w.metrics.processDuration.WithLabelValues(w.id, labelFailure).Observe(time.Since(start).Seconds())
w.metrics.tasksProcessed.WithLabelValues(w.id, labelFailure).Add(float64(len(tasks)))
level.Error(w.logger).Log("msg", "failed to process tasks", "err", err)
} else {
From 50b51dd6cae5d5d61d23d311c9db4a472f085b85 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alexander=20Lindesk=C3=A4r?=
Date: Mon, 19 Feb 2024 10:44:45 +0100
Subject: [PATCH 085/130] Helm: Add alibabacloud to isUsingObjectStorage.
(#11946)
Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>
---
production/helm/loki/CHANGELOG.md | 3 +++
production/helm/loki/Chart.yaml | 2 +-
production/helm/loki/README.md | 2 +-
production/helm/loki/templates/_helpers.tpl | 2 +-
4 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index d232a7d6c7ea..59487c984d6e 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,9 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 5.43.2
+
+- [BUGFIX] Added `alibabacloud` to `isUsingObjectStorage` check.
## 5.43.1
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 49d7ca836b8a..c20abdba25d8 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.9.4
-version: 5.43.1
+version: 5.43.2
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index a4ef51dd9d5e..86a13201c2eb 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-![Version: 5.43.1](https://img.shields.io/badge/Version-5.43.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
+![Version: 5.43.2](https://img.shields.io/badge/Version-5.43.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index 9dd70123189e..502c7650010b 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -597,7 +597,7 @@ Create the service endpoint including port for MinIO.
{{/* Determine if deployment is using object storage */}}
{{- define "loki.isUsingObjectStorage" -}}
-{{- or (eq .Values.loki.storage.type "gcs") (eq .Values.loki.storage.type "s3") (eq .Values.loki.storage.type "azure") (eq .Values.loki.storage.type "swift") -}}
+{{- or (eq .Values.loki.storage.type "gcs") (eq .Values.loki.storage.type "s3") (eq .Values.loki.storage.type "azure") (eq .Values.loki.storage.type "swift") (eq .Values.loki.storage.type "alibabacloud") -}}
{{- end -}}
{{/* Configure the correct name for the memberlist service */}}
From ffc61fbbf4990ccb00aaed107327fd5ff4d66e82 Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Mon, 19 Feb 2024 12:01:24 +0100
Subject: [PATCH 086/130] Remove blooms jsonnet components (#11984)
---
.../ksonnet/loki/bloom-compactor.libsonnet | 125 -------------
.../ksonnet/loki/bloom-gateway.libsonnet | 170 ------------------
.../ksonnet/loki/bloomfilters.libsonnet | 8 -
production/ksonnet/loki/loki.libsonnet | 3 -
4 files changed, 306 deletions(-)
delete mode 100644 production/ksonnet/loki/bloom-compactor.libsonnet
delete mode 100644 production/ksonnet/loki/bloom-gateway.libsonnet
delete mode 100644 production/ksonnet/loki/bloomfilters.libsonnet
diff --git a/production/ksonnet/loki/bloom-compactor.libsonnet b/production/ksonnet/loki/bloom-compactor.libsonnet
deleted file mode 100644
index d8c5e862fa10..000000000000
--- a/production/ksonnet/loki/bloom-compactor.libsonnet
+++ /dev/null
@@ -1,125 +0,0 @@
-{
- local k = import 'ksonnet-util/kausal.libsonnet',
- local container = k.core.v1.container,
- local containerPort = k.core.v1.containerPort,
- local pvc = k.core.v1.persistentVolumeClaim,
- local service = k.core.v1.service,
- local statefulSet = k.apps.v1.statefulSet,
- local volume = k.core.v1.volume,
- local volumeMount = k.core.v1.volumeMount,
-
- local name = 'bloom-compactor',
-
- _config+:: {
- bloom_compactor+: {
- // number of replicas
- replicas: if $._config.use_bloom_filters then 3 else 0,
- // PVC config
- pvc_size: if $._config.use_bloom_filters then error 'bloom_compactor.pvc_size needs to be defined' else '',
- pvc_class: if $._config.use_bloom_filters then error 'bloom_compactor.pvc_class needs to be defined' else '',
- },
- loki+:
- if $._config.use_bloom_filters
- then
- {
- bloom_compactor: {
- enabled: true,
- working_directory: '/data/blooms',
- compaction_interval: '15m',
- max_compaction_parallelism: 1,
- },
- }
- else {},
- },
-
- local cfg = self._config.bloom_compactor,
-
- local volumeName = name + '-data',
- local volumeMounts = [volumeMount.new(volumeName, '/data')],
-
- bloom_compactor_args::
- if $._config.use_bloom_filters
- then
- $._config.commonArgs {
- target: 'bloom-compactor',
- }
- else {},
-
- bloom_compactor_ports:: [
- containerPort.new(name='http-metrics', port=$._config.http_listen_port),
- containerPort.new(name='grpc', port=9095),
- ],
-
- bloom_compactor_data_pvc::
- if $._config.use_bloom_filters
- then
- pvc.new(volumeName)
- // set disk size
- + pvc.mixin.spec.resources.withRequests({ storage: $._config.bloom_compactor.pvc_size })
- // mount the volume as read-write by a single node
- + pvc.mixin.spec.withAccessModes(['ReadWriteOnce'])
- // set persistent volume storage class
- + pvc.mixin.spec.withStorageClassName($._config.bloom_compactor.pvc_class)
- else {},
-
-
- bloom_compactor_container::
- if $._config.use_bloom_filters
- then
- container.new(name, $._images.bloom_compactor)
- // add default ports
- + container.withPorts($.bloom_compactor_ports)
- // add target specific CLI arguments
- + container.withArgsMixin(k.util.mapToFlags($.bloom_compactor_args))
- // mount the data pvc at given mountpoint
- + container.withVolumeMountsMixin(volumeMounts)
- // add globale environment variables
- + container.withEnvMixin($._config.commonEnvs)
- // add HTTP readiness probe
- + container.mixin.readinessProbe.httpGet.withPath('/ready')
- + container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port)
- + container.mixin.readinessProbe.withTimeoutSeconds(1)
- // define container resource requests
- + k.util.resourcesRequests('2', '4Gi')
- // define container resource limits
- + k.util.resourcesLimits(null, '8Gi')
- else {},
-
- bloom_compactor_statefulset:
- if $._config.use_bloom_filters
- then
- statefulSet.new(name, cfg.replicas, [$.bloom_compactor_container], $.bloom_compactor_data_pvc)
- // add clusterIP service
- + statefulSet.mixin.spec.withServiceName(name)
- // perform rolling update when statefulset configuration changes
- + statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate')
- // launch or terminate pods in parallel, *does not* affect upgrades
- + statefulSet.mixin.spec.withPodManagementPolicy('Parallel')
- // 10001 is the user/group ID assigned to Loki in the Dockerfile
- + statefulSet.mixin.spec.template.spec.securityContext.withRunAsUser(10001)
- + statefulSet.mixin.spec.template.spec.securityContext.withRunAsGroup(10001)
- + statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001)
- // ensure statefulset is updated when loki config changes
- + $.config_hash_mixin
- // ensure no other workloads are scheduled
- + k.util.antiAffinity
- // mount the loki config.yaml
- + k.util.configVolumeMount('loki', '/etc/loki/config')
- // mount the runtime overrides.yaml
- + k.util.configVolumeMount('overrides', '/etc/loki/overrides')
- else {},
-
- bloom_compactor_service:
- if $._config.use_bloom_filters
- then
- k.util.serviceFor($.bloom_compactor_statefulset, $._config.service_ignored_labels)
- else {},
-
- bloom_compactor_headless_service:
- if $._config.use_bloom_filters
- then
- k.util.serviceFor($.bloom_compactor_statefulset, $._config.service_ignored_labels)
- + service.mixin.metadata.withName(name + '-headless')
- + service.mixin.spec.withClusterIp('None')
- else {},
-}
diff --git a/production/ksonnet/loki/bloom-gateway.libsonnet b/production/ksonnet/loki/bloom-gateway.libsonnet
deleted file mode 100644
index 18e50c7e0d91..000000000000
--- a/production/ksonnet/loki/bloom-gateway.libsonnet
+++ /dev/null
@@ -1,170 +0,0 @@
-{
- local k = import 'ksonnet-util/kausal.libsonnet',
- local container = k.core.v1.container,
- local containerPort = k.core.v1.containerPort,
- local pvc = k.core.v1.persistentVolumeClaim,
- local service = k.core.v1.service,
- local statefulSet = k.apps.v1.statefulSet,
- local volume = k.core.v1.volume,
- local volumeMount = k.core.v1.volumeMount,
-
- local name = 'bloom-gateway',
-
- _config+:: {
- bloom_gateway+: {
- // number of replicas
- replicas: if $._config.use_bloom_filters then 3 else 0,
- // if true, the host needs to have local SSD disks mounted, otherwise PVCs are used
- use_local_ssd: false,
- // PVC config
- pvc_size: if !self.use_local_ssd then error 'bloom_gateway.pvc_size needs to be defined when using PVC' else '',
- pvc_class: if !self.use_local_ssd then error 'bloom_gateway.pvc_class needs to be defined when using PVC' else '',
- // local SSD config
- hostpath: if self.use_local_ssd then error 'bloom_gateway.hostpath needs to be defined when using local SSDs' else '',
- node_selector: if self.use_local_ssd then error 'bloom_gateway.node_selector needs to be defined when using local SSDs' else {},
- tolerations: if self.use_local_ssd then error 'bloom_gateway.tolerations needs to be defined when using local SSDs' else [],
- },
- loki+:
- if $._config.use_bloom_filters
- then
- {
- bloom_gateway+: {
- enabled: true,
- worker_concurrency: 8,
- ring: {
- replication_factor: 3,
- },
- client: {
- cache_results: false,
- },
- },
- storage_config+: {
- bloom_shipper+: {
- working_directory: '/data/blooms',
- blocks_downloading_queue: {
- workers_count: 10,
- },
- blocks_cache: {
- enabled: true,
- max_size_mb: error 'set bloom_shipper.blocks_cache.max_size_mb to ~80% of available disk size',
- ttl: '24h',
- },
- },
- },
- }
- else {},
- },
-
- local cfg = self._config.bloom_gateway,
-
- local volumeName = name + '-data',
-
- local volumes =
- if cfg.use_local_ssd
- then [volume.fromHostPath(volumeName, cfg.hostpath)]
- else [],
-
- local volumeMounts = [
- volumeMount.new(volumeName, '/data'),
- ],
-
- bloom_gateway_args::
- if $._config.use_bloom_filters
- then
- $._config.commonArgs {
- target: 'bloom-gateway',
- }
- else {},
-
- bloom_gateway_ports:: [
- containerPort.new(name='http-metrics', port=$._config.http_listen_port),
- containerPort.new(name='grpc', port=9095),
- ],
-
- bloom_gateway_data_pvc::
- if $._config.use_bloom_filters && !cfg.use_local_ssd
- then
- pvc.new(volumeName)
- // set disk size
- + pvc.mixin.spec.resources.withRequests({ storage: $._config.bloom_gateway.pvc_size })
- // mount the volume as read-write by a single node
- + pvc.mixin.spec.withAccessModes(['ReadWriteOnce'])
- // set persistent volume storage class
- + pvc.mixin.spec.withStorageClassName($._config.bloom_compactor.pvc_class)
- else
- null,
-
- bloom_gateway_container::
- if $._config.use_bloom_filters
- then
- container.new(name, $._images.bloom_gateway)
- // add default ports
- + container.withPorts($.bloom_gateway_ports)
- // add target specific CLI arguments
- + container.withArgsMixin(k.util.mapToFlags($.bloom_gateway_args))
- // mount local SSD or PVC
- + container.withVolumeMountsMixin(volumeMounts)
- // add globale environment variables
- + container.withEnvMixin($._config.commonEnvs)
- // add HTTP readiness probe
- + container.mixin.readinessProbe.httpGet.withPath('/ready')
- + container.mixin.readinessProbe.httpGet.withPort($._config.http_listen_port)
- + container.mixin.readinessProbe.withTimeoutSeconds(1)
- // define container resource requests
- + k.util.resourcesRequests('2', '4Gi')
- // define container resource limits
- + k.util.resourcesLimits(null, '8Gi')
- else {},
-
- bloom_gateway_statefulset:
- if $._config.use_bloom_filters
- then
- statefulSet.new(name, cfg.replicas, [$.bloom_gateway_container])
- // add clusterIP service
- + statefulSet.mixin.spec.withServiceName(name)
- // perform rolling update when statefulset configuration changes
- + statefulSet.mixin.spec.updateStrategy.withType('RollingUpdate')
- // launch or terminate pods in parallel, *does not* affect upgrades
- + statefulSet.mixin.spec.withPodManagementPolicy('Parallel')
- // 10001 is the user/group ID assigned to Loki in the Dockerfile
- + statefulSet.mixin.spec.template.spec.securityContext.withRunAsUser(10001)
- + statefulSet.mixin.spec.template.spec.securityContext.withRunAsGroup(10001)
- + statefulSet.mixin.spec.template.spec.securityContext.withFsGroup(10001)
- // ensure statefulset is updated when loki config changes
- + $.config_hash_mixin
- // ensure no other workloads are scheduled
- + k.util.antiAffinity
- // mount the loki config.yaml
- + k.util.configVolumeMount('loki', '/etc/loki/config')
- // mount the runtime overrides.yaml
- + k.util.configVolumeMount('overrides', '/etc/loki/overrides')
- // configuration specific to SSD/PVC usage
- + (
- if cfg.use_local_ssd
- then
- // ensure the pod is scheduled on a node with local SSDs if needed
- statefulSet.mixin.spec.template.spec.withNodeSelector(cfg.node_selector)
- // tolerate the local-ssd taint
- + statefulSet.mixin.spec.template.spec.withTolerationsMixin(cfg.tolerations)
- // mount the local SSDs
- + statefulSet.mixin.spec.template.spec.withVolumesMixin(volumes)
- else
- // create persistent volume claim
- statefulSet.mixin.spec.withVolumeClaimTemplates([$.bloom_gateway_data_pvc])
- )
- else {},
-
- bloom_gateway_service:
- if $._config.use_bloom_filters
- then
- k.util.serviceFor($.bloom_gateway_statefulset, $._config.service_ignored_labels)
- else {},
-
- bloom_gateway_headless_service:
- if $._config.use_bloom_filters
- then
- k.util.serviceFor($.bloom_gateway_statefulset, $._config.service_ignored_labels)
- + service.mixin.metadata.withName(name + '-headless')
- + service.mixin.spec.withClusterIp('None')
- else {},
-}
diff --git a/production/ksonnet/loki/bloomfilters.libsonnet b/production/ksonnet/loki/bloomfilters.libsonnet
deleted file mode 100644
index 78231a808e1a..000000000000
--- a/production/ksonnet/loki/bloomfilters.libsonnet
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- _config+:: {
- // globally enable/disable bloom gateway and bloom compactor
- use_bloom_filters: false,
- },
-}
-+ (import 'bloom-compactor.libsonnet')
-+ (import 'bloom-gateway.libsonnet')
diff --git a/production/ksonnet/loki/loki.libsonnet b/production/ksonnet/loki/loki.libsonnet
index 871a68025e99..ad0489a69cd3 100644
--- a/production/ksonnet/loki/loki.libsonnet
+++ b/production/ksonnet/loki/loki.libsonnet
@@ -26,9 +26,6 @@
// BoltDB and TSDB Shipper support. Anything that modifies the compactor must be imported after this.
(import 'shipper.libsonnet') +
-// Accelerated search using bloom filters
-(import 'bloomfilters.libsonnet') +
-
(import 'table-manager.libsonnet') +
// Multi-zone ingester related config
From 85f7baaeda326c1f2df228c871f28cde9a4386cc Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Mon, 19 Feb 2024 09:01:59 -0800
Subject: [PATCH 087/130] Blooms/integration fixes (#11979)
---
pkg/bloomcompactor/batch.go | 14 +-
pkg/bloomcompactor/bloomcompactor.go | 15 +-
pkg/bloomcompactor/controller.go | 148 +++++++++++++-----
pkg/bloomcompactor/metrics.go | 41 ++++-
pkg/bloomcompactor/spec.go | 7 +-
pkg/bloomcompactor/tsdb.go | 3 +-
pkg/bloomgateway/util_test.go | 3 +-
pkg/storage/bloom/v1/builder.go | 15 ++
pkg/storage/bloom/v1/builder_test.go | 3 +-
pkg/storage/bloom/v1/index.go | 4 +-
pkg/storage/bloom/v1/metrics.go | 10 +-
.../stores/shipper/bloomshipper/client.go | 4 -
.../shipper/bloomshipper/client_test.go | 6 +-
.../shipper/bloomshipper/fetcher_test.go | 3 +-
.../stores/shipper/bloomshipper/resolver.go | 10 +-
.../stores/shipper/bloomshipper/shipper.go | 21 +--
.../shipper/bloomshipper/shipper_test.go | 43 -----
.../stores/shipper/bloomshipper/store_test.go | 3 +-
pkg/validation/limits.go | 7 +-
19 files changed, 222 insertions(+), 138 deletions(-)
diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
index bed0834a86b7..920bff1decc8 100644
--- a/pkg/bloomcompactor/batch.go
+++ b/pkg/bloomcompactor/batch.go
@@ -286,11 +286,10 @@ func (i *blockLoadingIter) loadNext() bool {
// check if there are more overlapping groups to load
if !i.overlapping.Next() {
i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
- return false
- }
+ if i.overlapping.Err() != nil {
+ i.err = i.overlapping.Err()
+ }
- if i.overlapping.Err() != nil {
- i.err = i.overlapping.Err()
return false
}
@@ -300,7 +299,7 @@ func (i *blockLoadingIter) loadNext() bool {
filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
- for filtered.Next() && filtered.Err() == nil {
+ for filtered.Next() {
bq := loader.At()
if _, ok := i.loaded[bq]; !ok {
i.loaded[bq] = struct{}{}
@@ -309,8 +308,9 @@ func (i *blockLoadingIter) loadNext() bool {
iters = append(iters, iter)
}
- if loader.Err() != nil {
- i.err = loader.Err()
+ if err := filtered.Err(); err != nil {
+ i.err = err
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
return false
}
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 3bb1c815e829..cc96cc7219e8 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -214,6 +214,7 @@ func (c *Compactor) ownsTenant(tenant string) (v1.FingerprintBounds, bool, error
// runs a single round of compaction for all relevant tenants and tables
func (c *Compactor) runOne(ctx context.Context) error {
+ level.Info(c.logger).Log("msg", "running bloom compaction", "workers", c.cfg.WorkerParallelism)
var workersErr error
var wg sync.WaitGroup
ch := make(chan tenantTable)
@@ -226,7 +227,11 @@ func (c *Compactor) runOne(ctx context.Context) error {
err := c.loadWork(ctx, ch)
wg.Wait()
- return multierror.New(workersErr, err, ctx.Err()).Err()
+ err = multierror.New(workersErr, err, ctx.Err()).Err()
+ if err != nil {
+ level.Error(c.logger).Log("msg", "compaction iteration failed", "err", err)
+ }
+ return err
}
func (c *Compactor) tables(ts time.Time) *dayRangeIterator {
@@ -241,6 +246,7 @@ func (c *Compactor) tables(ts time.Time) *dayRangeIterator {
fromDay := config.NewDayTime(model.TimeFromUnixNano(from))
throughDay := config.NewDayTime(model.TimeFromUnixNano(through))
+ level.Debug(c.logger).Log("msg", "loaded tables for compaction", "from", fromDay, "through", throughDay)
return newDayRangeIterator(fromDay, throughDay, c.schemaCfg)
}
@@ -250,6 +256,8 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
for tables.Next() && tables.Err() == nil && ctx.Err() == nil {
table := tables.At()
+ level.Debug(c.logger).Log("msg", "loading work for table", "table", table)
+
tenants, err := c.tenants(ctx, table)
if err != nil {
return errors.Wrap(err, "getting tenants")
@@ -262,6 +270,7 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
if err != nil {
return errors.Wrap(err, "checking tenant ownership")
}
+ level.Debug(c.logger).Log("msg", "enqueueing work for tenant", "tenant", tenant, "table", table, "ownership", ownershipRange.String(), "owns", owns)
if !owns {
c.metrics.tenantsSkipped.Inc()
continue
@@ -280,12 +289,14 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
}
if err := tenants.Err(); err != nil {
+ level.Error(c.logger).Log("msg", "error iterating tenants", "err", err)
return errors.Wrap(err, "iterating tenants")
}
}
if err := tables.Err(); err != nil {
+ level.Error(c.logger).Log("msg", "error iterating tables", "err", err)
return errors.Wrap(err, "iterating tables")
}
@@ -330,7 +341,7 @@ func (c *Compactor) runWorkers(ctx context.Context, ch <-chan tenantTable) error
}
func (c *Compactor) compactTenantTable(ctx context.Context, tt tenantTable) error {
- level.Info(c.logger).Log("msg", "compacting", "org_id", tt.tenant, "table", tt.table, "ownership", tt.ownershipRange)
+ level.Info(c.logger).Log("msg", "compacting", "org_id", tt.tenant, "table", tt.table, "ownership", tt.ownershipRange.String())
return c.controller.compactTenant(ctx, tt.table, tt.tenant, tt.ownershipRange)
}
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index ef41ec2d8efb..2a4ff6cd4524 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -70,7 +70,7 @@ func (s *SimpleBloomController) compactTenant(
tenant string,
ownershipRange v1.FingerprintBounds,
) error {
- logger := log.With(s.logger, "ownership", ownershipRange, "org_id", tenant, "table", table.Addr())
+ logger := log.With(s.logger, "org_id", tenant, "table", table.Addr(), "ownership", ownershipRange.String())
client, err := s.bloomStore.Client(table.ModelTime())
if err != nil {
@@ -92,6 +92,15 @@ func (s *SimpleBloomController) compactTenant(
return errors.Wrap(err, "failed to get metas")
}
+ level.Debug(logger).Log("msg", "found relevant metas", "metas", len(metas))
+
+ // fetch all metas overlapping our ownership range so we can safely
+ // check which metas can be deleted even if they only partially overlap out ownership range
+ superset, err := s.fetchSuperSet(ctx, tenant, table, ownershipRange, metas, logger)
+ if err != nil {
+ return errors.Wrap(err, "failed to fetch superset")
+ }
+
// build compaction plans
work, err := s.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger)
if err != nil {
@@ -104,6 +113,63 @@ func (s *SimpleBloomController) compactTenant(
return errors.Wrap(err, "failed to build gaps")
}
+ // combine built and superset metas
+ // in preparation for removing outdated ones
+ combined := append(superset, built...)
+
+ outdated := outdatedMetas(combined)
+ level.Debug(logger).Log("msg", "found outdated metas", "outdated", len(outdated))
+
+ var (
+ deletedMetas int
+ deletedBlocks int
+ )
+ defer func() {
+ s.metrics.metasDeleted.Add(float64(deletedMetas))
+ s.metrics.blocksDeleted.Add(float64(deletedBlocks))
+ }()
+
+ for _, meta := range outdated {
+ for _, block := range meta.Blocks {
+ err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block})
+ if err != nil {
+ if client.IsObjectNotFoundErr(err) {
+ level.Debug(logger).Log("msg", "block not found while attempting delete, continuing", "block", block.String())
+ } else {
+ level.Error(logger).Log("msg", "failed to delete block", "err", err, "block", block.String())
+ return errors.Wrap(err, "failed to delete block")
+ }
+ }
+ deletedBlocks++
+ level.Debug(logger).Log("msg", "removed outdated block", "block", block.String())
+ }
+
+ err = client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef})
+ if err != nil {
+ if client.IsObjectNotFoundErr(err) {
+ level.Debug(logger).Log("msg", "meta not found while attempting delete, continuing", "meta", meta.MetaRef.String())
+ } else {
+ level.Error(logger).Log("msg", "failed to delete meta", "err", err, "meta", meta.MetaRef.String())
+ return errors.Wrap(err, "failed to delete meta")
+ }
+ }
+ deletedMetas++
+ level.Debug(logger).Log("msg", "removed outdated meta", "meta", meta.MetaRef.String())
+ }
+
+ level.Debug(logger).Log("msg", "finished compaction")
+ return nil
+}
+
+// fetchSuperSet fetches all metas which overlap the ownership range of the first set of metas we've resolved
+func (s *SimpleBloomController) fetchSuperSet(
+ ctx context.Context,
+ tenant string,
+ table config.DayTable,
+ ownershipRange v1.FingerprintBounds,
+ metas []bloomshipper.Meta,
+ logger log.Logger,
+) ([]bloomshipper.Meta, error) {
// in order to delete outdates metas which only partially fall within the ownership range,
// we need to fetcha all metas in the entire bound range of the first set of metas we've resolved
/*
@@ -121,12 +187,28 @@ func (s *SimpleBloomController) compactTenant(
union := superset.Union(meta.Bounds)
if len(union) > 1 {
level.Error(logger).Log("msg", "meta bounds union is not a single range", "union", union)
- return errors.New("meta bounds union is not a single range")
+ return nil, errors.New("meta bounds union is not a single range")
}
superset = union[0]
}
- metas, err = s.bloomStore.FetchMetas(
+ within := superset.Within(ownershipRange)
+ level.Debug(logger).Log(
+ "msg", "looking for superset metas",
+ "superset", superset.String(),
+ "superset_within", within,
+ )
+
+ if within {
+ // we don't need to fetch any more metas
+ // NB(owen-d): here we copy metas into the output. This is slightly inefficient, but
+ // helps prevent mutability bugs by returning the same slice as the input.
+ results := make([]bloomshipper.Meta, len(metas))
+ copy(results, metas)
+ return results, nil
+ }
+
+ supersetMetas, err := s.bloomStore.FetchMetas(
ctx,
bloomshipper.MetaSearchParams{
TenantID: tenant,
@@ -134,42 +216,20 @@ func (s *SimpleBloomController) compactTenant(
Keyspace: superset,
},
)
+
if err != nil {
level.Error(logger).Log("msg", "failed to get meta superset range", "err", err, "superset", superset)
- return errors.Wrap(err, "failed to get meta supseret range")
+ return nil, errors.Wrap(err, "failed to get meta supseret range")
}
- // combine built and pre-existing metas
- // in preparation for removing outdated metas
- metas = append(metas, built...)
-
- outdated := outdatedMetas(metas)
- for _, meta := range outdated {
- for _, block := range meta.Blocks {
- if err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}); err != nil {
- if client.IsObjectNotFoundErr(err) {
- level.Debug(logger).Log("msg", "block not found while attempting delete, continuing", "block", block)
- continue
- }
-
- level.Error(logger).Log("msg", "failed to delete blocks", "err", err)
- return errors.Wrap(err, "failed to delete blocks")
- }
- }
-
- if err := client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef}); err != nil {
- if client.IsObjectNotFoundErr(err) {
- level.Debug(logger).Log("msg", "meta not found while attempting delete, continuing", "meta", meta.MetaRef)
- } else {
- level.Error(logger).Log("msg", "failed to delete metas", "err", err)
- return errors.Wrap(err, "failed to delete metas")
- }
- }
- }
-
- level.Debug(logger).Log("msg", "finished compaction")
- return nil
+ level.Debug(logger).Log(
+ "msg", "found superset metas",
+ "metas", len(metas),
+ "fresh_metas", len(supersetMetas),
+ "delta", len(supersetMetas)-len(metas),
+ )
+ return supersetMetas, nil
}
func (s *SimpleBloomController) findOutdatedGaps(
@@ -271,6 +331,7 @@ func (s *SimpleBloomController) buildGaps(
for i := range plan.gaps {
gap := plan.gaps[i]
+ logger := log.With(logger, "gap", gap.bounds.String(), "tsdb", plan.tsdb.Name())
meta := bloomshipper.Meta{
MetaRef: bloomshipper.MetaRef{
@@ -304,9 +365,11 @@ func (s *SimpleBloomController) buildGaps(
blocksIter,
s.rwFn,
s.metrics,
- log.With(logger, "tsdb", plan.tsdb.Name(), "ownership", gap),
+ logger,
)
+ level.Debug(logger).Log("msg", "generating blocks", "overlapping_blocks", len(gap.blocks))
+
newBlocks := gen.Generate(ctx)
if err != nil {
level.Error(logger).Log("msg", "failed to generate bloom", "err", err)
@@ -333,6 +396,16 @@ func (s *SimpleBloomController) buildGaps(
blocksIter.Close()
return nil, errors.Wrap(err, "failed to write block")
}
+ s.metrics.blocksCreated.Inc()
+
+ totalGapKeyspace := (gap.bounds.Max - gap.bounds.Min)
+ progress := (built.Bounds.Max - gap.bounds.Min)
+ pct := float64(progress) / float64(totalGapKeyspace) * 100
+ level.Debug(logger).Log(
+ "msg", "uploaded block",
+ "block", built.BlockRef.String(),
+ "progress_pct", fmt.Sprintf("%.2f", pct),
+ )
meta.Blocks = append(meta.Blocks, built.BlockRef)
}
@@ -346,6 +419,7 @@ func (s *SimpleBloomController) buildGaps(
blocksIter.Close()
// Write the new meta
+ // TODO(owen-d): put total size in log, total time in metrics+log
ref, err := bloomshipper.MetaRefFrom(tenant, table.Addr(), gap.bounds, meta.Sources, meta.Blocks)
if err != nil {
level.Error(logger).Log("msg", "failed to checksum meta", "err", err)
@@ -357,8 +431,10 @@ func (s *SimpleBloomController) buildGaps(
level.Error(logger).Log("msg", "failed to write meta", "err", err)
return nil, errors.Wrap(err, "failed to write meta")
}
- created = append(created, meta)
+ s.metrics.metasCreated.Inc()
+ level.Debug(logger).Log("msg", "uploaded meta", "meta", meta.MetaRef.String())
+ created = append(created, meta)
totalSeries += uint64(seriesItrWithCounter.Count())
}
}
diff --git a/pkg/bloomcompactor/metrics.go b/pkg/bloomcompactor/metrics.go
index 350e3ed7e480..74378cb78642 100644
--- a/pkg/bloomcompactor/metrics.go
+++ b/pkg/bloomcompactor/metrics.go
@@ -31,6 +31,11 @@ type Metrics struct {
tenantsCompleted *prometheus.CounterVec
tenantsCompletedTime *prometheus.HistogramVec
tenantsSeries prometheus.Histogram
+
+ blocksCreated prometheus.Counter
+ blocksDeleted prometheus.Counter
+ metasCreated prometheus.Counter
+ metasDeleted prometheus.Counter
}
func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
@@ -53,13 +58,13 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
compactionsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "compactions_started",
+ Name: "compactions_started_total",
Help: "Total number of compactions started",
}),
compactionCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "compactions_completed",
+ Name: "compactions_completed_total",
Help: "Total number of compactions completed",
}, []string{"status"}),
compactionTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
@@ -73,7 +78,7 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "tenants_discovered",
+ Name: "tenants_discovered_total",
Help: "Number of tenants discovered during the current compaction run",
}),
tenantsOwned: promauto.With(r).NewCounter(prometheus.CounterOpts{
@@ -85,19 +90,19 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
tenantsSkipped: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "tenants_skipped",
+ Name: "tenants_skipped_total",
Help: "Number of tenants skipped since they are not owned by this instance",
}),
tenantsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "tenants_started",
+ Name: "tenants_started_total",
Help: "Number of tenants started to process during the current compaction run",
}),
tenantsCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "tenants_completed",
+ Name: "tenants_completed_total",
Help: "Number of tenants successfully processed during the current compaction run",
}, []string{"status"}),
tenantsCompletedTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
@@ -115,6 +120,30 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
// Up to 10M series per tenant, way more than what we expect given our max_global_streams_per_user limits
Buckets: prometheus.ExponentialBucketsRange(1, 10000000, 10),
}),
+ blocksCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "blocks_created_total",
+ Help: "Number of blocks created",
+ }),
+ blocksDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "blocks_deleted_total",
+ Help: "Number of blocks deleted",
+ }),
+ metasCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "metas_created_total",
+ Help: "Number of metas created",
+ }),
+ metasDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "metas_deleted_total",
+ Help: "Number of metas deleted",
+ }),
}
return &m
diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go
index 67d41b650e37..cb030dfb5913 100644
--- a/pkg/bloomcompactor/spec.go
+++ b/pkg/bloomcompactor/spec.go
@@ -138,7 +138,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Blo
)
}
- return NewLazyBlockBuilderIterator(ctx, s.opts, s.populator(ctx), s.readWriterFn, series, s.blocksIter)
+ return NewLazyBlockBuilderIterator(ctx, s.opts, s.metrics, s.populator(ctx), s.readWriterFn, series, s.blocksIter)
}
// LazyBlockBuilderIterator is a lazy iterator over blocks that builds
@@ -146,6 +146,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Blo
type LazyBlockBuilderIterator struct {
ctx context.Context
opts v1.BlockOptions
+ metrics *Metrics
populate func(*v1.Series, *v1.Bloom) error
readWriterFn func() (v1.BlockWriter, v1.BlockReader)
series v1.PeekingIterator[*v1.Series]
@@ -158,6 +159,7 @@ type LazyBlockBuilderIterator struct {
func NewLazyBlockBuilderIterator(
ctx context.Context,
opts v1.BlockOptions,
+ metrics *Metrics,
populate func(*v1.Series, *v1.Bloom) error,
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
series v1.PeekingIterator[*v1.Series],
@@ -166,6 +168,7 @@ func NewLazyBlockBuilderIterator(
return &LazyBlockBuilderIterator{
ctx: ctx,
opts: opts,
+ metrics: metrics,
populate: populate,
readWriterFn: readWriterFn,
series: series,
@@ -189,7 +192,7 @@ func (b *LazyBlockBuilderIterator) Next() bool {
return false
}
- mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate)
+ mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate, b.metrics.bloomMetrics)
writer, reader := b.readWriterFn()
blockBuilder, err := v1.NewBlockBuilder(b.opts, writer)
if err != nil {
diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go
index 6159ce02a804..7f5ec5eab81a 100644
--- a/pkg/bloomcompactor/tsdb.go
+++ b/pkg/bloomcompactor/tsdb.go
@@ -236,8 +236,7 @@ func NewTSDBStores(
if err != nil {
return nil, errors.Wrap(err, "failed to create object client")
}
- prefix := path.Join(cfg.IndexTables.PathPrefix, cfg.IndexTables.Prefix)
- res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, prefix))
+ res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, cfg.IndexTables.PathPrefix))
}
}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index 6bc43cf79434..e9776dfef78f 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -323,8 +323,7 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
MetaRef: bloomshipper.MetaRef{
Ref: ref,
},
- BlockTombstones: []bloomshipper.BlockRef{},
- Blocks: []bloomshipper.BlockRef{blockRef},
+ Blocks: []bloomshipper.BlockRef{blockRef},
}
block, data, _ := v1.MakeBlock(t, n, fromFp, throughFp, from, through)
// Printing fingerprints and the log lines of its chunks comes handy for debugging...
diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
index d2d51b557e5d..b094b847f2ef 100644
--- a/pkg/storage/bloom/v1/builder.go
+++ b/pkg/storage/bloom/v1/builder.go
@@ -526,6 +526,7 @@ type MergeBuilder struct {
store Iterator[*Series]
// Add chunks to a bloom
populate func(*Series, *Bloom) error
+ metrics *Metrics
}
// NewMergeBuilder is a specific builder which does the following:
@@ -536,11 +537,13 @@ func NewMergeBuilder(
blocks Iterator[*SeriesWithBloom],
store Iterator[*Series],
populate func(*Series, *Bloom) error,
+ metrics *Metrics,
) *MergeBuilder {
return &MergeBuilder{
blocks: blocks,
store: store,
populate: populate,
+ metrics: metrics,
}
}
@@ -568,6 +571,8 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
nextInBlocks = deduped.At()
}
+ var chunksIndexed, chunksCopied int
+
cur := nextInBlocks
chunksToAdd := nextInStore.Chunks
// The next series from the store doesn't exist in the blocks, so we add it
@@ -583,8 +588,11 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
} else {
// if the series already exists in the block, we only need to add the new chunks
chunksToAdd = nextInStore.Chunks.Unless(nextInBlocks.Series.Chunks)
+ chunksCopied = len(nextInStore.Chunks) - len(chunksToAdd)
}
+ chunksIndexed = len(chunksToAdd)
+
if len(chunksToAdd) > 0 {
if err := mb.populate(
&Series{
@@ -597,6 +605,9 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
}
}
+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeIterated).Add(float64(chunksIndexed))
+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeCopied).Add(float64(chunksCopied))
+
blockFull, err := builder.AddSeries(*cur)
if err != nil {
return 0, errors.Wrap(err, "adding series to block")
@@ -606,6 +617,10 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
}
}
+ if err := mb.store.Err(); err != nil {
+ return 0, errors.Wrap(err, "iterating store")
+ }
+
checksum, err := builder.Close()
if err != nil {
return 0, errors.Wrap(err, "closing block")
diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go
index 0122a35f7751..0013ad874457 100644
--- a/pkg/storage/bloom/v1/builder_test.go
+++ b/pkg/storage/bloom/v1/builder_test.go
@@ -226,7 +226,7 @@ func TestMergeBuilder(t *testing.T) {
)
// Ensure that the merge builder combines all the blocks correctly
- mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, pop)
+ mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, pop, NewMetrics(nil))
indexBuf := bytes.NewBuffer(nil)
bloomsBuf := bytes.NewBuffer(nil)
writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
@@ -400,6 +400,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
// We're not actually indexing new data in this test
return nil
},
+ NewMetrics(nil),
)
builder, err := NewBlockBuilder(DefaultBlockOptions, writer)
require.Nil(t, err)
diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go
index e3a14dc5453e..58d43b8cd0ac 100644
--- a/pkg/storage/bloom/v1/index.go
+++ b/pkg/storage/bloom/v1/index.go
@@ -234,8 +234,8 @@ func aggregateHeaders(xs []SeriesHeader) SeriesHeader {
Bounds: NewBounds(fromFp, throughFP),
}
- for _, x := range xs {
- if x.FromTs < res.FromTs {
+ for i, x := range xs {
+ if i == 0 || x.FromTs < res.FromTs {
res.FromTs = x.FromTs
}
if x.ThroughTs > res.ThroughTs {
diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go
index aa604c29f157..f5568a9d7659 100644
--- a/pkg/storage/bloom/v1/metrics.go
+++ b/pkg/storage/bloom/v1/metrics.go
@@ -10,12 +10,16 @@ type Metrics struct {
bloomSize prometheus.Histogram // size of the bloom filter in bytes
hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter
estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter
+ chunksIndexed *prometheus.CounterVec
}
+const chunkIndexedTypeIterated = "iterated"
+const chunkIndexedTypeCopied = "copied"
+
func NewMetrics(r prometheus.Registerer) *Metrics {
return &Metrics{
sbfCreationTime: promauto.With(r).NewCounter(prometheus.CounterOpts{
- Name: "bloom_creation_time",
+ Name: "bloom_creation_time_total",
Help: "Time spent creating scalable bloom filters",
}),
bloomSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
@@ -33,5 +37,9 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
Help: "Estimated number of elements in the bloom filter",
Buckets: prometheus.ExponentialBucketsRange(1, 33554432, 10),
}),
+ chunksIndexed: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Name: "bloom_chunks_indexed_total",
+ Help: "Number of chunks indexed in bloom filters, partitioned by type. Type can be iterated or copied, where iterated indicates the chunk data was fetched and ngrams for it's contents generated whereas copied indicates the chunk already existed in another source block and was copied to the new block",
+ }, []string{"type"}),
}
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 882b0eab41c2..240f2b516658 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -88,10 +88,6 @@ type Meta struct {
// The specific TSDB files used to generate the block.
Sources []tsdb.SingleTenantTSDBIdentifier
- // TODO(owen-d): remove, unused
- // Old blocks which can be deleted in the future. These should be from previous compaction rounds.
- BlockTombstones []BlockRef
-
// A list of blocks that were generated
Blocks []BlockRef
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go
index 897ed519946a..e5bbe3b5b1bf 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go
@@ -63,8 +63,7 @@ func putMeta(c *BloomClient, tenant string, start model.Time, minFp, maxFp model
// EndTimestamp: start.Add(12 * time.Hour),
},
},
- Blocks: []BlockRef{},
- BlockTombstones: []BlockRef{},
+ Blocks: []BlockRef{},
}
raw, _ := json.Marshal(meta)
return meta, c.client.PutObject(context.Background(), c.Meta(meta.MetaRef).Addr(), bytes.NewReader(raw))
@@ -129,8 +128,7 @@ func TestBloomClient_PutMeta(t *testing.T) {
// EndTimestamp: start.Add(12 * time.Hour),
},
},
- Blocks: []BlockRef{},
- BlockTombstones: []BlockRef{},
+ Blocks: []BlockRef{},
}
err := c.PutMeta(ctx, meta)
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
index 40a695e0b8e6..962bebb9956f 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
@@ -34,8 +34,7 @@ func makeMetas(t *testing.T, schemaCfg config.SchemaConfig, ts model.Time, keysp
EndTimestamp: ts,
},
},
- BlockTombstones: []BlockRef{},
- Blocks: []BlockRef{},
+ Blocks: []BlockRef{},
}
}
return metas
diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver.go b/pkg/storage/stores/shipper/bloomshipper/resolver.go
index 40a59cee42db..7d224b9f0139 100644
--- a/pkg/storage/stores/shipper/bloomshipper/resolver.go
+++ b/pkg/storage/stores/shipper/bloomshipper/resolver.go
@@ -14,6 +14,9 @@ const (
BloomPrefix = "bloom"
MetasPrefix = "metas"
BlocksPrefix = "blocks"
+
+ extTarGz = ".tar.gz"
+ extJSON = ".json"
)
// KeyResolver is an interface for resolving keys to locations.
@@ -36,7 +39,7 @@ func (defaultKeyResolver) Meta(ref MetaRef) Location {
fmt.Sprintf("%v", ref.TableName),
ref.TenantID,
MetasPrefix,
- fmt.Sprintf("%v-%v", ref.Bounds, ref.Checksum),
+ fmt.Sprintf("%v-%x%s", ref.Bounds, ref.Checksum, extJSON),
}
}
@@ -50,7 +53,8 @@ func (defaultKeyResolver) ParseMetaKey(loc Location) (MetaRef, error) {
if err != nil {
return MetaRef{}, fmt.Errorf("failed to parse bounds of meta key %s : %w", loc, err)
}
- checksum, err := strconv.ParseUint(fnParts[2], 16, 64)
+ withoutExt := strings.TrimSuffix(fnParts[2], extJSON)
+ checksum, err := strconv.ParseUint(withoutExt, 16, 64)
if err != nil {
return MetaRef{}, fmt.Errorf("failed to parse checksum of meta key %s : %w", loc, err)
}
@@ -77,7 +81,7 @@ func (defaultKeyResolver) Block(ref BlockRef) Location {
ref.TenantID,
BlocksPrefix,
ref.Bounds.String(),
- fmt.Sprintf("%d-%d-%x", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum),
+ fmt.Sprintf("%d-%d-%x%s", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum, extTarGz),
}
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go
index fd755b0a204a..3267886ac063 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go
@@ -55,30 +55,15 @@ func (s *Shipper) Stop() {
}
// BlocksForMetas returns all the blocks from all the metas listed that are within the requested bounds
-// and not tombstoned in any of the metas
-func BlocksForMetas(metas []Meta, interval Interval, keyspaces []v1.FingerprintBounds) []BlockRef {
- blocks := make(map[BlockRef]bool) // block -> isTombstoned
-
+func BlocksForMetas(metas []Meta, interval Interval, keyspaces []v1.FingerprintBounds) (refs []BlockRef) {
for _, meta := range metas {
- for _, tombstone := range meta.BlockTombstones {
- blocks[tombstone] = true
- }
for _, block := range meta.Blocks {
- tombstoned, ok := blocks[block]
- if ok && tombstoned {
- // skip tombstoned blocks
- continue
+ if !isOutsideRange(block, interval, keyspaces) {
+ refs = append(refs, block)
}
- blocks[block] = false
}
}
- refs := make([]BlockRef, 0, len(blocks))
- for ref, tombstoned := range blocks {
- if !tombstoned && !isOutsideRange(ref, interval, keyspaces) {
- refs = append(refs, ref)
- }
- }
sort.Slice(refs, func(i, j int) bool {
return refs[i].Bounds.Less(refs[j].Bounds)
})
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
index c9e47f91fea2..e03d72c26ba3 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
@@ -14,49 +14,6 @@ import (
)
func TestBloomShipper_findBlocks(t *testing.T) {
- t.Run("expected block that are specified in tombstones to be filtered out", func(t *testing.T) {
- metas := []Meta{
- {
- Blocks: []BlockRef{
- //this blockRef is marked as deleted in the next meta
- createMatchingBlockRef(1),
- createMatchingBlockRef(2),
- },
- },
- {
- Blocks: []BlockRef{
- //this blockRef is marked as deleted in the next meta
- createMatchingBlockRef(3),
- createMatchingBlockRef(4),
- },
- },
- {
- BlockTombstones: []BlockRef{
- createMatchingBlockRef(1),
- createMatchingBlockRef(3),
- },
- Blocks: []BlockRef{
- createMatchingBlockRef(5),
- },
- },
- }
-
- ts := model.Now()
-
- interval := NewInterval(
- ts.Add(-2*time.Hour),
- ts.Add(-1*time.Hour),
- )
- blocks := BlocksForMetas(metas, interval, []v1.FingerprintBounds{{Min: 100, Max: 200}})
-
- expectedBlockRefs := []BlockRef{
- createMatchingBlockRef(2),
- createMatchingBlockRef(4),
- createMatchingBlockRef(5),
- }
- require.ElementsMatch(t, expectedBlockRefs, blocks)
- })
-
tests := map[string]struct {
minFingerprint uint64
maxFingerprint uint64
diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go
index ca86cb94fa96..c99aa46df4bf 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go
@@ -83,8 +83,7 @@ func createMetaInStorage(store *BloomStore, tenant string, start model.Time, min
// EndTimestamp: start.Add(12 * time.Hour),
},
},
- Blocks: []BlockRef{},
- BlockTombstones: []BlockRef{},
+ Blocks: []BlockRef{},
}
err := store.storeDo(start, func(s *bloomStoreEntry) error {
raw, _ := json.Marshal(meta)
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 9627718aa8ec..00ee2e152144 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -339,7 +339,12 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&l.BloomGatewayBlocksDownloadingParallelism, "bloom-gateway.blocks-downloading-parallelism", 50, "Maximum number of blocks will be downloaded in parallel by the Bloom Gateway.")
f.DurationVar(&l.BloomGatewayCacheKeyInterval, "bloom-gateway.cache-key-interval", 15*time.Minute, "Interval for computing the cache key in the Bloom Gateway.")
_ = l.BloomCompactorMaxBlockSize.Set(defaultBloomCompactorMaxBlockSize)
- f.Var(&l.BloomCompactorMaxBlockSize, "bloom-compactor.max-block-size", "The maximum bloom block size. A value of 0 sets an unlimited size. Default is 200MB. The actual block size might exceed this limit since blooms will be added to blocks until the block exceeds the maximum block size.")
+ f.Var(&l.BloomCompactorMaxBlockSize, "bloom-compactor.max-block-size",
+ fmt.Sprintf(
+ "The maximum bloom block size. A value of 0 sets an unlimited size. Default is %s. The actual block size might exceed this limit since blooms will be added to blocks until the block exceeds the maximum block size.",
+ defaultBloomCompactorMaxBlockSize,
+ ),
+ )
l.ShardStreams = &shardstreams.Config{}
l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f)
From 38c09a09bd8b3b4d2b97c10542cf5abff13cde87 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Mon, 19 Feb 2024 20:01:56 +0100
Subject: [PATCH 088/130] Bloom gateway: Fix metric for fetched blocks (#11988)
This PR fixes the metric for fetched blocks
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/processor.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 7d1d68785397..a7641bde0c96 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -73,7 +73,7 @@ func (p *processor) processBlocks(ctx context.Context, data []blockWithTasks) er
if err != nil {
return err
}
- p.metrics.metasFetched.WithLabelValues(p.id).Observe(float64(len(bqs)))
+ p.metrics.blocksFetched.WithLabelValues(p.id).Observe(float64(len(bqs)))
blockIter := v1.NewSliceIter(bqs)
From 0660cfc9df554d7e6aba77307738a7e893a491a0 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 19 Feb 2024 16:22:53 -0800
Subject: [PATCH 089/130] fix(deps): update github.com/axiomhq/hyperloglog
digest to 24bca3a (main) (#11756)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
go.mod | 2 +-
go.sum | 4 ++--
vendor/modules.txt | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/go.mod b/go.mod
index 6235582406d5..5b4c32ddb214 100644
--- a/go.mod
+++ b/go.mod
@@ -118,7 +118,7 @@ require (
github.com/DmitriyVTitov/size v1.5.0
github.com/IBM/go-sdk-core/v5 v5.13.1
github.com/IBM/ibm-cos-sdk-go v1.10.0
- github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc
+ github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b
github.com/d4l3k/messagediff v1.2.1
github.com/efficientgo/core v1.0.0-rc.2
github.com/fsnotify/fsnotify v1.6.0
diff --git a/go.sum b/go.sum
index 8ab729e92805..744c904e823c 100644
--- a/go.sum
+++ b/go.sum
@@ -368,8 +368,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1Lce
github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w=
github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
-github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc h1:Keo7wQ7UODUaHcEi7ltENhbAK2VgZjfat6mLy03tQzo=
-github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
+github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b h1:F3yMzKumBUQ6Fn0sYI1YQ16vQRucpZOfBQ9HXWl5+XI=
+github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
github.com/baidubce/bce-sdk-go v0.9.141 h1:EV5BH5lfymIGPSmYDo9xYdsVlvWAW6nFeiA6t929zBE=
github.com/baidubce/bce-sdk-go v0.9.141/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
diff --git a/vendor/modules.txt b/vendor/modules.txt
index e7cbf47887d6..2992d1e44075 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -400,7 +400,7 @@ github.com/aws/smithy-go/rand
github.com/aws/smithy-go/time
github.com/aws/smithy-go/transport/http
github.com/aws/smithy-go/transport/http/internal/io
-# github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc
+# github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b
## explicit; go 1.12
github.com/axiomhq/hyperloglog
# github.com/baidubce/bce-sdk-go v0.9.141
From b7cb85f92b7f903a854bf0f84b9c80c2efaa4334 Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Tue, 20 Feb 2024 10:41:15 +0100
Subject: [PATCH 090/130] Tail queue (#11930)
---
pkg/ingester/stream.go | 38 +++++++++++-----------
pkg/ingester/tailer.go | 64 +++++++++++++++++++++++++++++++------
pkg/ingester/tailer_test.go | 50 +++++++++++++++++++++++++++++
3 files changed, 122 insertions(+), 30 deletions(-)
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index 4c6aa4f9a122..81ce43692925 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -288,30 +288,28 @@ func (s *stream) recordAndSendToTailers(record *wal.Record, entries []logproto.E
hasTailers := len(s.tailers) != 0
s.tailerMtx.RUnlock()
if hasTailers {
- go func() {
- stream := logproto.Stream{Labels: s.labelsString, Entries: entries}
-
- closedTailers := []uint32{}
-
- s.tailerMtx.RLock()
- for _, tailer := range s.tailers {
- if tailer.isClosed() {
- closedTailers = append(closedTailers, tailer.getID())
- continue
- }
- tailer.send(stream, s.labels)
+ stream := logproto.Stream{Labels: s.labelsString, Entries: entries}
+
+ closedTailers := []uint32{}
+
+ s.tailerMtx.RLock()
+ for _, tailer := range s.tailers {
+ if tailer.isClosed() {
+ closedTailers = append(closedTailers, tailer.getID())
+ continue
}
- s.tailerMtx.RUnlock()
+ tailer.send(stream, s.labels)
+ }
+ s.tailerMtx.RUnlock()
- if len(closedTailers) != 0 {
- s.tailerMtx.Lock()
- defer s.tailerMtx.Unlock()
+ if len(closedTailers) != 0 {
+ s.tailerMtx.Lock()
+ defer s.tailerMtx.Unlock()
- for _, closedTailerID := range closedTailers {
- delete(s.tailers, closedTailerID)
- }
+ for _, closedTailerID := range closedTailers {
+ delete(s.tailers, closedTailerID)
}
- }()
+ }
}
}
diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go
index 3e9a8a64cfd8..25fdfdb740d7 100644
--- a/pkg/ingester/tailer.go
+++ b/pkg/ingester/tailer.go
@@ -17,13 +17,21 @@ import (
util_log "github.com/grafana/loki/pkg/util/log"
)
-const bufferSizeForTailResponse = 5
+const (
+ bufferSizeForTailResponse = 5
+ bufferSizeForTailStream = 100
+)
type TailServer interface {
Send(*logproto.TailResponse) error
Context() context.Context
}
+type tailRequest struct {
+ stream logproto.Stream
+ lbs labels.Labels
+}
+
type tailer struct {
id uint32
orgID string
@@ -31,6 +39,7 @@ type tailer struct {
pipeline syntax.Pipeline
pipelineMtx sync.Mutex
+ queue chan tailRequest
sendChan chan *logproto.Stream
// Signaling channel used to notify once the tailer gets closed
@@ -59,6 +68,7 @@ func newTailer(orgID string, expr syntax.LogSelectorExpr, conn TailServer, maxDr
orgID: orgID,
matchers: matchers,
sendChan: make(chan *logproto.Stream, bufferSizeForTailResponse),
+ queue: make(chan tailRequest, bufferSizeForTailStream),
conn: conn,
droppedStreams: make([]*logproto.DroppedStream, 0, maxDroppedStreams),
maxDroppedStreams: maxDroppedStreams,
@@ -73,6 +83,9 @@ func (t *tailer) loop() {
var err error
var ok bool
+ // Launch a go routine to receive streams sent with t.send
+ go t.receiveStreamsLoop()
+
for {
select {
case <-t.conn.Context().Done():
@@ -102,6 +115,37 @@ func (t *tailer) loop() {
}
}
+func (t *tailer) receiveStreamsLoop() {
+ defer t.close()
+ for {
+ select {
+ case <-t.conn.Context().Done():
+ return
+ case <-t.closeChan:
+ return
+ case req, ok := <-t.queue:
+ if !ok {
+ return
+ }
+
+ streams := t.processStream(req.stream, req.lbs)
+ if len(streams) == 0 {
+ continue
+ }
+
+ for _, s := range streams {
+ select {
+ case t.sendChan <- s:
+ default:
+ t.dropStream(*s)
+ }
+ }
+ }
+ }
+}
+
+// send sends a stream to the tailer for processing and sending to the client.
+// It will drop the stream if the tailer is blocked or the queue is full.
func (t *tailer) send(stream logproto.Stream, lbs labels.Labels) {
if t.isClosed() {
return
@@ -117,16 +161,16 @@ func (t *tailer) send(stream logproto.Stream, lbs labels.Labels) {
return
}
- streams := t.processStream(stream, lbs)
- if len(streams) == 0 {
- return
+ // Send stream to queue for processing asynchronously
+ // If the queue is full, drop the stream
+ req := tailRequest{
+ stream: stream,
+ lbs: lbs,
}
- for _, s := range streams {
- select {
- case t.sendChan <- s:
- default:
- t.dropStream(*s)
- }
+ select {
+ case t.queue <- req:
+ default:
+ t.dropStream(stream)
}
}
diff --git a/pkg/ingester/tailer_test.go b/pkg/ingester/tailer_test.go
index 674dde3df8af..11de0d4daf82 100644
--- a/pkg/ingester/tailer_test.go
+++ b/pkg/ingester/tailer_test.go
@@ -2,6 +2,7 @@ package ingester
import (
"context"
+ "fmt"
"math/rand"
"sync"
"testing"
@@ -15,6 +16,55 @@ import (
"github.com/grafana/loki/pkg/logql/syntax"
)
+func TestTailer_RoundTrip(t *testing.T) {
+ server := &fakeTailServer{}
+
+ lbs := makeRandomLabels()
+ expr, err := syntax.ParseLogSelector(lbs.String(), true)
+ require.NoError(t, err)
+ tail, err := newTailer("org-id", expr, server, 10)
+ require.NoError(t, err)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ tail.loop()
+ wg.Done()
+ }()
+
+ const numStreams = 1000
+ var entries []logproto.Entry
+ for i := 0; i < numStreams; i += 3 {
+ var iterEntries []logproto.Entry
+ for j := 0; j < 3; j++ {
+ iterEntries = append(iterEntries, logproto.Entry{Timestamp: time.Unix(0, int64(i+j)), Line: fmt.Sprintf("line %d", i+j)})
+ }
+ entries = append(entries, iterEntries...)
+
+ tail.send(logproto.Stream{
+ Labels: lbs.String(),
+ Entries: iterEntries,
+ }, lbs)
+
+ // sleep a bit to allow the tailer to process the stream without dropping
+ // This should take about 5 seconds to process all the streams
+ time.Sleep(5 * time.Millisecond)
+ }
+
+ // Wait for the stream to be received by the server.
+ require.Eventually(t, func() bool {
+ return len(server.GetResponses()) > 0
+ }, 30*time.Second, 1*time.Second, "stream was not received")
+
+ var processedEntries []logproto.Entry
+ for _, response := range server.GetResponses() {
+ processedEntries = append(processedEntries, response.Stream.Entries...)
+ }
+ require.ElementsMatch(t, entries, processedEntries)
+
+ tail.close()
+ wg.Wait()
+}
+
func TestTailer_sendRaceConditionOnSendWhileClosing(t *testing.T) {
runs := 100
From fac5997b18e3fb07f92c20f4fa429213574e49cf Mon Sep 17 00:00:00 2001
From: Kaviraj Kanagaraj
Date: Tue, 20 Feb 2024 11:09:06 +0100
Subject: [PATCH 091/130] feat: Support split align and caching for instant
metric query results (#11814)
Signed-off-by: Kaviraj
---
.gitignore | 4 +-
CHANGELOG.md | 3 +-
cmd/loki/loki-local-with-memcached.yaml | 11 +
docs/sources/configure/_index.md | 30 ++
pkg/logql/downstream.go | 4 +
pkg/logql/metrics.go | 17 +-
pkg/logql/rangemapper.go | 85 +++++
pkg/logql/rangemapper_test.go | 84 ++++-
pkg/logqlmodel/stats/context.go | 49 ++-
pkg/logqlmodel/stats/stats.pb.go | 230 +++++++-----
pkg/logqlmodel/stats/stats.proto | 4 +
pkg/loki/config_wrapper.go | 7 +
pkg/loki/config_wrapper_test.go | 43 +++
pkg/querier/queryrange/codec_test.go | 110 +++---
pkg/querier/queryrange/downstreamer.go | 43 ++-
pkg/querier/queryrange/downstreamer_test.go | 181 +++++++---
.../queryrange/instant_metric_cache.go | 85 +++++
pkg/querier/queryrange/limits.go | 9 +
pkg/querier/queryrange/limits/definitions.go | 1 +
pkg/querier/queryrange/prometheus_test.go | 10 +
pkg/querier/queryrange/roundtrip.go | 104 +++++-
pkg/querier/queryrange/roundtrip_test.go | 8 +
pkg/querier/queryrange/split_by_range.go | 33 +-
pkg/querier/queryrange/split_by_range_test.go | 332 +++++++++++++++++-
pkg/util/marshal/legacy/marshal_test.go | 12 +-
pkg/util/marshal/marshal_test.go | 22 +-
pkg/validation/limits.go | 8 +
27 files changed, 1274 insertions(+), 255 deletions(-)
create mode 100644 pkg/querier/queryrange/instant_metric_cache.go
diff --git a/.gitignore b/.gitignore
index 66eb0a8cefeb..83ab9c808d34 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,8 +27,8 @@ cmd/querytee/querytee
dlv
rootfs/
dist
-coverage.txt
-test_results.txt
+*coverage.txt
+*test_results.txt
.DS_Store
.aws-sam
.idea
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8abd9a846458..fa8861228407 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@
##### Enhancements
+* [11814](https://github.com/grafana/loki/pull/11814) **kavirajk**: feat: Support split align and caching for instant metric query results
* [11851](https://github.com/grafana/loki/pull/11851) **elcomtik**: Helm: Allow the definition of resources for GrafanaAgent pods.
* [11819](https://github.com/grafana/loki/pull/11819) **jburnham**: Ruler: Add the ability to disable the `X-Scope-OrgId` tenant identification header in remote write requests.
* [11633](https://github.com/grafana/loki/pull/11633) **cyriltovena**: Add profiling integrations to tracing instrumentation.
@@ -70,7 +71,7 @@
* [11657](https://github.com/grafana/loki/pull/11657) **ashwanthgoli** Log results cache: compose empty response based on the request being served to avoid returning incorrect limit or direction.
* [11587](https://github.com/grafana/loki/pull/11587) **trevorwhitney** Fix semantics of label parsing logic of metrics and logs queries. Both only parse the first label if multiple extractions into the same label are requested.
* [11776](https://github.com/grafana/loki/pull/11776) **ashwanthgoli** Background Cache: Fixes a bug that is causing the background queue size to be incremented twice for each enqueued item.
-* [11921](https://github.com/grafana/loki/pull/11921) **paul1r**: Parsing: String array elements were not being parsed correctly in JSON processing
+* [11921](https://github.com/grafana/loki/pull/11921) **paul1r**: Parsing: String array elements were not being parsed correctly in JSON processing
##### Changes
diff --git a/cmd/loki/loki-local-with-memcached.yaml b/cmd/loki/loki-local-with-memcached.yaml
index d1b0ae1c2493..a2f4336cdd48 100644
--- a/cmd/loki/loki-local-with-memcached.yaml
+++ b/cmd/loki/loki-local-with-memcached.yaml
@@ -22,6 +22,17 @@ query_range:
cache_results: true
cache_volume_results: true
cache_series_results: true
+ cache_instant_metric_results: true
+ instant_metric_query_split_align: true
+ instant_metric_results_cache:
+ cache:
+ default_validity: 12h
+ memcached_client:
+ consistent_hash: true
+ addresses: "dns+localhost:11211"
+ max_idle_conns: 16
+ timeout: 500ms
+ update_interval: 1m
series_results_cache:
cache:
default_validity: 12h
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index d3c5593b4da2..70891a044841 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -886,6 +886,28 @@ volume_results_cache:
# CLI flag: -frontend.volume-results-cache.compression
[compression: | default = ""]
+# Cache instant metric query results.
+# CLI flag: -querier.cache-instant-metric-results
+[cache_instant_metric_results: | default = false]
+
+# If a cache config is not specified and cache_instant_metric_results is true,
+# the config for the results cache is used.
+instant_metric_results_cache:
+ # The cache block configures the cache backend.
+ # The CLI flags prefix for this block configuration is:
+ # frontend.instant-metric-results-cache
+ [cache: ]
+
+ # Use compression in cache. The default is an empty value '', which disables
+ # compression. Supported values are: 'snappy' and ''.
+ # CLI flag: -frontend.instant-metric-results-cache.compression
+ [compression: | default = ""]
+
+# Whether to align the splits of instant metric query with splitByInterval and
+# query's exec time. Useful when instant_metric_cache is enabled
+# CLI flag: -querier.instant-metric-query-split-align
+[instant_metric_query_split_align: | default = false]
+
# Cache series query results.
# CLI flag: -querier.cache-series-results
[cache_series_results: | default = false]
@@ -2935,6 +2957,13 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# CLI flag: -experimental.querier.recent-metadata-query-window
[recent_metadata_query_window: | default = 0s]
+# Split instant metric queries by a time interval and execute in parallel. The
+# value 0 disables splitting instant metric queries by time. This also
+# determines how cache keys are chosen when instant metric query result caching
+# is enabled.
+# CLI flag: -querier.split-instant-metric-queries-by-interval
+[split_instant_metric_queries_by_interval: | default = 1h]
+
# Interval to use for time-based splitting when a request is within the
# `query_ingesters_within` window; defaults to `split-queries-by-interval` by
# setting to 0.
@@ -4403,6 +4432,7 @@ The cache block configures the cache backend. The supported CLI flags ``
- `bloom.metas-cache`
- `frontend`
- `frontend.index-stats-results-cache`
+- `frontend.instant-metric-results-cache`
- `frontend.label-results-cache`
- `frontend.series-results-cache`
- `frontend.volume-results-cache`
diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go
index 33d945f11b92..6946c06e54a0 100644
--- a/pkg/logql/downstream.go
+++ b/pkg/logql/downstream.go
@@ -636,6 +636,10 @@ func NewResultStepEvaluator(res logqlmodel.Result, params Params) (StepEvaluator
step = params.Step()
)
+ if res.Data == nil {
+ return nil, fmt.Errorf("data in the passed result is nil (res.Data), cannot be processed by stepevaluator")
+ }
+
switch data := res.Data.(type) {
case promql.Vector:
return NewVectorStepEvaluator(start, data), nil
diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go
index 40fbece82d87..b55e9840a475 100644
--- a/pkg/logql/metrics.go
+++ b/pkg/logql/metrics.go
@@ -94,7 +94,8 @@ func RecordRangeAndInstantQueryMetrics(
) {
var (
logger = fixLogger(ctx, log)
- rt = string(GetRangeType(p))
+ rangeType = GetRangeType(p)
+ rt = string(rangeType)
latencyType = latencyTypeFast
returnedLines = 0
)
@@ -103,6 +104,12 @@ func RecordRangeAndInstantQueryMetrics(
level.Warn(logger).Log("msg", "error parsing query type", "err", err)
}
+ resultCache := stats.Caches.Result
+
+ if queryType == QueryTypeMetric && rangeType == InstantType {
+ resultCache = stats.Caches.InstantMetricResult
+ }
+
// Tag throughput metric by latency type based on a threshold.
// Latency below the threshold is fast, above is slow.
if stats.Summary.ExecTime > slowQueryThresholdSecond {
@@ -162,10 +169,10 @@ func RecordRangeAndInstantQueryMetrics(
"cache_volume_results_req", stats.Caches.VolumeResult.EntriesRequested,
"cache_volume_results_hit", stats.Caches.VolumeResult.EntriesFound,
"cache_volume_results_download_time", stats.Caches.VolumeResult.CacheDownloadTime(),
- "cache_result_req", stats.Caches.Result.EntriesRequested,
- "cache_result_hit", stats.Caches.Result.EntriesFound,
- "cache_result_download_time", stats.Caches.Result.CacheDownloadTime(),
- "cache_result_query_length_served", stats.Caches.Result.CacheQueryLengthServed(),
+ "cache_result_req", resultCache.EntriesRequested,
+ "cache_result_hit", resultCache.EntriesFound,
+ "cache_result_download_time", resultCache.CacheDownloadTime(),
+ "cache_result_query_length_served", resultCache.CacheQueryLengthServed(),
}...)
logValues = append(logValues, tagsToKeyValues(queryTags)...)
diff --git a/pkg/logql/rangemapper.go b/pkg/logql/rangemapper.go
index 975f63f4c952..14cf76f1475a 100644
--- a/pkg/logql/rangemapper.go
+++ b/pkg/logql/rangemapper.go
@@ -57,6 +57,20 @@ type RangeMapper struct {
splitByInterval time.Duration
metrics *MapperMetrics
stats *MapperStats
+
+ splitAlignTs time.Time
+}
+
+// NewRangeMapperWithSplitAlign is similar to `NewRangeMapper` except it accepts additonal `splitAlign` argument and used to
+// align the subqueries generated according to that. Look at `rangeSplitAlign` method for more information.
+func NewRangeMapperWithSplitAlign(interval time.Duration, splitAlign time.Time, metrics *MapperMetrics, stats *MapperStats) (RangeMapper, error) {
+ rm, err := NewRangeMapper(interval, metrics, stats)
+ if err != nil {
+ return RangeMapper{}, err
+ }
+ rm.splitAlignTs = splitAlign
+
+ return rm, nil
}
// NewRangeMapper creates a new RangeMapper instance with the given duration as
@@ -327,6 +341,77 @@ func (m RangeMapper) getOriginalOffset(expr syntax.SampleExpr) (offset time.Dura
// rangeInterval should be greater than m.splitByInterval, otherwise the resultant expression
// will have an unnecessary aggregation operation
func (m RangeMapper) mapConcatSampleExpr(expr syntax.SampleExpr, rangeInterval time.Duration, recorder *downstreamRecorder) syntax.SampleExpr {
+ if m.splitAlignTs.IsZero() {
+ return m.rangeSplit(expr, rangeInterval, recorder)
+ }
+ return m.rangeSplitAlign(expr, rangeInterval, recorder)
+}
+
+// rangeSplitAlign try to split given `rangeInterval` into units of `m.splitByInterval` by making sure `rangeInterval` is aligned with `m.splitByInterval` for as much as the units as possible.
+// Consider following example with real use case.
+// Instant Query: `sum(rate({foo="bar"}[3h])`
+// execTs: 12:34:00
+// splitBy: 1h
+// Given above parameters, queries will be split into following
+// 1. sum(rate({foo="bar"}[34m]))
+// 2. sum(rate({foo="bar"}[1h] offset 34m))
+// 3. sum(rate({foo="bar"}[1h] offset 1h34m))
+// 4. sum(rate({foo="bar"}[26m] offset 2h34m))
+func (m RangeMapper) rangeSplitAlign(
+ expr syntax.SampleExpr, rangeInterval time.Duration, recorder *downstreamRecorder,
+) syntax.SampleExpr {
+ if rangeInterval <= m.splitByInterval {
+ return expr
+ }
+
+ originalOffset, err := m.getOriginalOffset(expr)
+ if err != nil {
+ return expr
+ }
+
+ align := m.splitAlignTs.Sub(m.splitAlignTs.Truncate(m.splitByInterval)) // say, 12:34:00 - 12:00:00(truncated) = 34m
+
+ if align == 0 {
+ return m.rangeSplit(expr, rangeInterval, recorder) // Don't have to align
+ }
+
+ var (
+ newRng = align
+
+ // TODO(kavi): If the originalOffset is non-zero, there may be a edge case, where subqueries generated won't be aligned correctly. Handle this edge case in separate PR.
+ newOffset = originalOffset
+ downstreams *ConcatSampleExpr
+ pendingRangeInterval = rangeInterval
+ splits = 0
+ )
+
+ // first subquery
+ downstreams = appendDownstream(downstreams, expr, newRng, newOffset)
+ splits++
+
+ newOffset += align // e.g: offset 34m
+ pendingRangeInterval -= newRng
+ newRng = m.splitByInterval // [1h]
+
+ // Rest of the subqueries.
+ for pendingRangeInterval > 0 {
+ if pendingRangeInterval < m.splitByInterval {
+ newRng = pendingRangeInterval // last subquery
+ }
+ downstreams = appendDownstream(downstreams, expr, newRng, newOffset)
+ newOffset += m.splitByInterval
+ pendingRangeInterval -= newRng
+ splits++
+ }
+
+ // update stats and metrics
+ m.stats.AddSplitQueries(splits)
+ recorder.Add(splits, MetricsKey)
+
+ return downstreams
+}
+
+func (m RangeMapper) rangeSplit(expr syntax.SampleExpr, rangeInterval time.Duration, recorder *downstreamRecorder) syntax.SampleExpr {
splitCount := int(math.Ceil(float64(rangeInterval) / float64(m.splitByInterval)))
if splitCount <= 1 {
return expr
diff --git a/pkg/logql/rangemapper_test.go b/pkg/logql/rangemapper_test.go
index 562ac0cd168e..5e95486a8c8e 100644
--- a/pkg/logql/rangemapper_test.go
+++ b/pkg/logql/rangemapper_test.go
@@ -93,6 +93,84 @@ func Test_SplitRangeInterval(t *testing.T) {
}
}
+func Test_RangeMapperSplitAlign(t *testing.T) {
+ cases := []struct {
+ name string
+ expr string
+ queryTime time.Time
+ splityByInterval time.Duration
+ expected string
+ expectedSplits int
+ }{
+ {
+ name: "query_time_aligned_with_split_by",
+ expr: `bytes_over_time({app="foo"}[3m])`,
+ expected: `sum without() (
+ downstream>
+ ++ downstream>
+ ++ downstream>
+ )`,
+ queryTime: time.Unix(60, 0), // 1970 00:01:00
+ splityByInterval: 1 * time.Minute,
+ expectedSplits: 3,
+ },
+ {
+ name: "query_time_aligned_with_split_by_with_original_offset",
+ expr: `bytes_over_time({app="foo"}[3m] offset 20m10s)`, // NOTE: original query has offset, which should be considered in all the splits subquery
+ expected: `sum without() (
+ downstream>
+ ++ downstream>
+ ++ downstream>
+ )`,
+ queryTime: time.Unix(60, 0), // 1970 00:01:00
+ splityByInterval: 1 * time.Minute,
+ expectedSplits: 3,
+ },
+ {
+ name: "query_time_not_aligned_with_split_by",
+ expr: `bytes_over_time({app="foo"}[3h])`,
+ expected: `sum without() (
+ downstream>
+ ++ downstream>
+ ++ downstream>
+ ++ downstream>
+ )`,
+ queryTime: time.Date(0, 0, 0, 12, 54, 0, 0, time.UTC), // 1970 12:54:00
+ splityByInterval: 1 * time.Hour,
+ expectedSplits: 4,
+ },
+ {
+ name: "query_time_not_aligned_with_split_by_with_original_offset",
+ expr: `bytes_over_time({app="foo"}[3h] offset 1h2m20s)`, // NOTE: original query has offset, which should be considered in all the splits subquery
+ expected: `sum without() (
+ downstream>
+ ++ downstream>
+ ++ downstream>
+ ++ downstream>
+ )`,
+ queryTime: time.Date(0, 0, 0, 12, 54, 0, 0, time.UTC), // 1970 12:54:00
+ splityByInterval: 1 * time.Hour,
+ expectedSplits: 4,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ mapperStats := NewMapperStats()
+ rvm, err := NewRangeMapperWithSplitAlign(tc.splityByInterval, tc.queryTime, nilShardMetrics, mapperStats)
+ require.NoError(t, err)
+
+ noop, mappedExpr, err := rvm.Parse(syntax.MustParseExpr(tc.expr))
+ require.NoError(t, err)
+
+ require.Equal(t, removeWhiteSpace(tc.expected), removeWhiteSpace(mappedExpr.String()))
+ require.Equal(t, tc.expectedSplits, mapperStats.GetSplitQueries())
+ require.False(t, noop)
+
+ })
+ }
+}
+
func Test_SplitRangeVectorMapping(t *testing.T) {
for _, tc := range []struct {
expr string
@@ -1675,7 +1753,7 @@ func Test_SplitRangeVectorMapping(t *testing.T) {
// Non-splittable vector aggregators - should go deeper in the AST
{
`topk(2, count_over_time({app="foo"}[3m]))`,
- `topk(2,
+ `topk(2,
sum without () (
downstream>
++ downstream>
@@ -1713,7 +1791,7 @@ func Test_SplitRangeVectorMapping(t *testing.T) {
++ downstream>
++ downstream>
)
- ),
+ ),
"x", "$1", "a", "(.*)"
)`,
3,
@@ -1727,7 +1805,7 @@ func Test_SplitRangeVectorMapping(t *testing.T) {
++ downstream>
++ downstream>
)
- / 180),
+ / 180),
"foo", "$1", "service", "(.*):.*"
)`,
3,
diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go
index 4fbddc790b8b..41a96ca24c75 100644
--- a/pkg/logqlmodel/stats/context.go
+++ b/pkg/logqlmodel/stats/context.go
@@ -55,17 +55,18 @@ type Context struct {
type CacheType string
const (
- ChunkCache CacheType = "chunk" //nolint:staticcheck
- IndexCache CacheType = "index" //nolint:staticcheck
- ResultCache CacheType = "result" //nolint:staticcheck
- StatsResultCache CacheType = "stats-result" //nolint:staticcheck
- VolumeResultCache CacheType = "volume-result" //nolint:staticcheck
- WriteDedupeCache CacheType = "write-dedupe" //nolint:staticcheck
- SeriesResultCache CacheType = "series-result" //nolint:staticcheck
- LabelResultCache CacheType = "label-result" //nolint:staticcheck
- BloomFilterCache CacheType = "bloom-filter" //nolint:staticcheck
- BloomBlocksCache CacheType = "bloom-blocks" //nolint:staticcheck
- BloomMetasCache CacheType = "bloom-metas" //nolint:staticcheck
+ ChunkCache CacheType = "chunk" //nolint:staticcheck
+ IndexCache CacheType = "index" //nolint:staticcheck
+ ResultCache CacheType = "result" //nolint:staticcheck
+ StatsResultCache CacheType = "stats-result" //nolint:staticcheck
+ VolumeResultCache CacheType = "volume-result" //nolint:staticcheck
+ InstantMetricResultsCache CacheType = "instant-metric-result" // nolint:staticcheck
+ WriteDedupeCache CacheType = "write-dedupe" //nolint:staticcheck
+ SeriesResultCache CacheType = "series-result" //nolint:staticcheck
+ LabelResultCache CacheType = "label-result" //nolint:staticcheck
+ BloomFilterCache CacheType = "bloom-filter" //nolint:staticcheck
+ BloomBlocksCache CacheType = "bloom-blocks" //nolint:staticcheck
+ BloomMetasCache CacheType = "bloom-metas" //nolint:staticcheck
)
// NewContext creates a new statistics context
@@ -98,13 +99,14 @@ func (c *Context) Ingester() Ingester {
// Caches returns the cache statistics accumulated so far.
func (c *Context) Caches() Caches {
return Caches{
- Chunk: c.caches.Chunk,
- Index: c.caches.Index,
- Result: c.caches.Result,
- StatsResult: c.caches.StatsResult,
- VolumeResult: c.caches.VolumeResult,
- SeriesResult: c.caches.SeriesResult,
- LabelResult: c.caches.LabelResult,
+ Chunk: c.caches.Chunk,
+ Index: c.caches.Index,
+ Result: c.caches.Result,
+ StatsResult: c.caches.StatsResult,
+ VolumeResult: c.caches.VolumeResult,
+ SeriesResult: c.caches.SeriesResult,
+ LabelResult: c.caches.LabelResult,
+ InstantMetricResult: c.caches.InstantMetricResult,
}
}
@@ -222,6 +224,7 @@ func (c *Caches) Merge(m Caches) {
c.VolumeResult.Merge(m.VolumeResult)
c.SeriesResult.Merge(m.SeriesResult)
c.LabelResult.Merge(m.LabelResult)
+ c.InstantMetricResult.Merge(m.InstantMetricResult)
}
func (c *Cache) Merge(m Cache) {
@@ -470,6 +473,8 @@ func (c *Context) getCacheStatsByType(t CacheType) *Cache {
stats = &c.caches.SeriesResult
case LabelResultCache:
stats = &c.caches.LabelResult
+ case InstantMetricResultsCache:
+ stats = &c.caches.InstantMetricResult
default:
return nil
}
@@ -571,6 +576,12 @@ func (c Caches) Log(log log.Logger) {
"Cache.Result.EntriesStored", c.Result.EntriesStored,
"Cache.Result.BytesSent", humanize.Bytes(uint64(c.Result.BytesSent)),
"Cache.Result.BytesReceived", humanize.Bytes(uint64(c.Result.BytesReceived)),
- "Cache.Result.DownloadTime", c.Result.CacheDownloadTime(),
+ "Cache.InstantMetricResult.Requests", c.InstantMetricResult.Requests,
+ "Cache.InstantMetricResult.EntriesRequested", c.InstantMetricResult.EntriesRequested,
+ "Cache.InstantMetricResult.EntriesFound", c.InstantMetricResult.EntriesFound,
+ "Cache.InstantMetricResult.EntriesStored", c.InstantMetricResult.EntriesStored,
+ "Cache.InstantMetricResult.BytesSent", humanize.Bytes(uint64(c.InstantMetricResult.BytesSent)),
+ "Cache.InstantMetricResult.BytesReceived", humanize.Bytes(uint64(c.InstantMetricResult.BytesReceived)),
+ "Cache.InstantMetricResult.DownloadTime", c.InstantMetricResult.CacheDownloadTime(),
)
}
diff --git a/pkg/logqlmodel/stats/stats.pb.go b/pkg/logqlmodel/stats/stats.pb.go
index 75be704020c9..65f8f0f64238 100644
--- a/pkg/logqlmodel/stats/stats.pb.go
+++ b/pkg/logqlmodel/stats/stats.pb.go
@@ -95,13 +95,14 @@ func (m *Result) GetCaches() Caches {
}
type Caches struct {
- Chunk Cache `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk"`
- Index Cache `protobuf:"bytes,2,opt,name=index,proto3" json:"index"`
- Result Cache `protobuf:"bytes,3,opt,name=result,proto3" json:"result"`
- StatsResult Cache `protobuf:"bytes,4,opt,name=statsResult,proto3" json:"statsResult"`
- VolumeResult Cache `protobuf:"bytes,5,opt,name=volumeResult,proto3" json:"volumeResult"`
- SeriesResult Cache `protobuf:"bytes,6,opt,name=seriesResult,proto3" json:"seriesResult"`
- LabelResult Cache `protobuf:"bytes,7,opt,name=labelResult,proto3" json:"labelResult"`
+ Chunk Cache `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk"`
+ Index Cache `protobuf:"bytes,2,opt,name=index,proto3" json:"index"`
+ Result Cache `protobuf:"bytes,3,opt,name=result,proto3" json:"result"`
+ StatsResult Cache `protobuf:"bytes,4,opt,name=statsResult,proto3" json:"statsResult"`
+ VolumeResult Cache `protobuf:"bytes,5,opt,name=volumeResult,proto3" json:"volumeResult"`
+ SeriesResult Cache `protobuf:"bytes,6,opt,name=seriesResult,proto3" json:"seriesResult"`
+ LabelResult Cache `protobuf:"bytes,7,opt,name=labelResult,proto3" json:"labelResult"`
+ InstantMetricResult Cache `protobuf:"bytes,8,opt,name=instantMetricResult,proto3" json:"instantMetricResult"`
}
func (m *Caches) Reset() { *m = Caches{} }
@@ -185,6 +186,13 @@ func (m *Caches) GetLabelResult() Cache {
return Cache{}
}
+func (m *Caches) GetInstantMetricResult() Cache {
+ if m != nil {
+ return m.InstantMetricResult
+ }
+ return Cache{}
+}
+
// Summary is the summary of a query statistics.
type Summary struct {
// Total bytes processed per second.
@@ -773,83 +781,85 @@ func init() {
func init() { proto.RegisterFile("pkg/logqlmodel/stats/stats.proto", fileDescriptor_6cdfe5d2aea33ebb) }
var fileDescriptor_6cdfe5d2aea33ebb = []byte{
- // 1215 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0x4d, 0x6f, 0xe3, 0x54,
- 0x17, 0x8e, 0x27, 0xaf, 0x93, 0xce, 0xed, 0xe7, 0xdc, 0x76, 0xde, 0xc9, 0x80, 0x64, 0x97, 0xc0,
- 0x88, 0x22, 0x50, 0x23, 0x3e, 0x24, 0x04, 0x62, 0x24, 0xe4, 0x0e, 0x95, 0x2a, 0x75, 0x44, 0x39,
- 0x81, 0x0d, 0x3b, 0xc7, 0xbe, 0x4d, 0xa2, 0x3a, 0x76, 0x6a, 0x5f, 0x97, 0xe9, 0x0a, 0x7e, 0x02,
- 0x3f, 0x83, 0x0d, 0x2b, 0x56, 0x48, 0x88, 0x0d, 0x9b, 0x59, 0x76, 0x39, 0x2b, 0x8b, 0xa6, 0x1b,
- 0xe4, 0xd5, 0x48, 0xfc, 0x01, 0x74, 0xcf, 0xbd, 0xf1, 0x57, 0x9c, 0x99, 0x6e, 0xe2, 0x7b, 0x9e,
- 0xf3, 0x3c, 0xe7, 0x7e, 0x9e, 0x73, 0x6f, 0xc8, 0xee, 0xf4, 0x6c, 0xd8, 0xf3, 0x82, 0xe1, 0xb9,
- 0x37, 0x09, 0x5c, 0xe6, 0xf5, 0x22, 0x6e, 0xf3, 0x48, 0xfe, 0xee, 0x4f, 0xc3, 0x80, 0x07, 0x54,
- 0x47, 0xe3, 0x8d, 0x9d, 0x61, 0x30, 0x0c, 0x10, 0xe9, 0x89, 0x96, 0x74, 0x76, 0xff, 0xd5, 0x48,
- 0x0b, 0x58, 0x14, 0x7b, 0x9c, 0x7e, 0x46, 0xda, 0x51, 0x3c, 0x99, 0xd8, 0xe1, 0x65, 0x47, 0xdb,
- 0xd5, 0xf6, 0x56, 0x3f, 0xda, 0xd8, 0x97, 0x61, 0xfa, 0x12, 0xb5, 0x36, 0x9f, 0x27, 0x66, 0x23,
- 0x4d, 0xcc, 0x39, 0x0d, 0xe6, 0x0d, 0x21, 0x3d, 0x8f, 0x59, 0x38, 0x66, 0x61, 0xe7, 0x4e, 0x49,
- 0xfa, 0x8d, 0x44, 0x73, 0xa9, 0xa2, 0xc1, 0xbc, 0x41, 0x1f, 0x93, 0x95, 0xb1, 0x3f, 0x64, 0x11,
- 0x67, 0x61, 0xa7, 0x89, 0xda, 0x4d, 0xa5, 0x3d, 0x52, 0xb0, 0xb5, 0xa5, 0xc4, 0x19, 0x11, 0xb2,
- 0x16, 0xfd, 0x84, 0xb4, 0x1c, 0xdb, 0x19, 0xb1, 0xa8, 0xf3, 0x3f, 0x14, 0xaf, 0x2b, 0xf1, 0x01,
- 0x82, 0xd6, 0xba, 0x92, 0xea, 0x48, 0x02, 0xc5, 0xed, 0xfe, 0xd9, 0x24, 0x2d, 0xc9, 0xa0, 0x1f,
- 0x12, 0xdd, 0x19, 0xc5, 0xfe, 0x99, 0x9a, 0xf3, 0x5a, 0x51, 0x5f, 0x90, 0x0b, 0x0a, 0xc8, 0x8f,
- 0x90, 0x8c, 0x7d, 0x97, 0x3d, 0x53, 0x73, 0x5d, 0x22, 0x41, 0x0a, 0xc8, 0x8f, 0x18, 0x66, 0x88,
- 0xab, 0xac, 0xe6, 0x58, 0xd6, 0x6c, 0x28, 0x8d, 0xe2, 0x80, 0xfa, 0xd2, 0x03, 0xb2, 0x8a, 0x34,
- 0xb9, 0x41, 0x6a, 0x86, 0x65, 0xe9, 0xb6, 0x92, 0x16, 0x89, 0x50, 0x34, 0xe8, 0x21, 0x59, 0xbb,
- 0x08, 0xbc, 0x78, 0xc2, 0x54, 0x14, 0xbd, 0x26, 0xca, 0x8e, 0x8a, 0x52, 0x62, 0x42, 0xc9, 0x12,
- 0x71, 0x22, 0xb1, 0x65, 0xf3, 0xd1, 0xb4, 0x5e, 0x15, 0xa7, 0xc8, 0x84, 0x92, 0x25, 0x26, 0xe5,
- 0xd9, 0x03, 0xe6, 0xa9, 0x30, 0xed, 0x57, 0x4d, 0xaa, 0x40, 0x84, 0xa2, 0xd1, 0xfd, 0xbd, 0x45,
- 0xda, 0xea, 0x58, 0xd2, 0xef, 0xc8, 0x83, 0xc1, 0x25, 0x67, 0xd1, 0x49, 0x18, 0x38, 0x2c, 0x8a,
- 0x98, 0x7b, 0xc2, 0xc2, 0x3e, 0x73, 0x02, 0xdf, 0xc5, 0x3d, 0x6d, 0x5a, 0x6f, 0xa6, 0x89, 0xb9,
- 0x8c, 0x02, 0xcb, 0x1c, 0x22, 0xac, 0x37, 0xf6, 0x6b, 0xc3, 0xde, 0xc9, 0xc3, 0x2e, 0xa1, 0xc0,
- 0x32, 0x07, 0x3d, 0x22, 0xdb, 0x3c, 0xe0, 0xb6, 0x67, 0x95, 0xba, 0xc5, 0x63, 0xd1, 0xb4, 0x1e,
- 0xa4, 0x89, 0x59, 0xe7, 0x86, 0x3a, 0x30, 0x0b, 0x75, 0x5c, 0xea, 0x0a, 0x8f, 0x49, 0x31, 0x54,
- 0xd9, 0x0d, 0x75, 0x20, 0xdd, 0x23, 0x2b, 0xec, 0x19, 0x73, 0xbe, 0x1d, 0x4f, 0x18, 0x1e, 0x10,
- 0xcd, 0x5a, 0x13, 0x09, 0x37, 0xc7, 0x20, 0x6b, 0xd1, 0xf7, 0xc9, 0xdd, 0xf3, 0x98, 0xc5, 0x0c,
- 0xa9, 0x2d, 0xa4, 0xae, 0xa7, 0x89, 0x99, 0x83, 0x90, 0x37, 0xe9, 0x3e, 0x21, 0x51, 0x3c, 0x90,
- 0xa9, 0x1e, 0xe1, 0x56, 0x37, 0xad, 0x8d, 0x34, 0x31, 0x0b, 0x28, 0x14, 0xda, 0xf4, 0x98, 0xec,
- 0xe0, 0xe8, 0xbe, 0xf2, 0xb9, 0x3c, 0x31, 0x3c, 0x0e, 0x7d, 0xe6, 0x76, 0x56, 0x50, 0xd9, 0x49,
- 0x13, 0xb3, 0xd6, 0x0f, 0xb5, 0x28, 0xed, 0x92, 0x56, 0x34, 0xf5, 0xc6, 0x3c, 0xea, 0xdc, 0x45,
- 0x3d, 0x11, 0x29, 0x26, 0x11, 0x50, 0x5f, 0xe4, 0x8c, 0xec, 0xd0, 0x8d, 0x3a, 0xa4, 0xc0, 0x41,
- 0x04, 0xd4, 0x37, 0x1b, 0xd5, 0x49, 0x10, 0xf1, 0xc3, 0xb1, 0xc7, 0x59, 0x88, 0xab, 0xd7, 0x59,
- 0xad, 0x8c, 0xaa, 0xe2, 0x87, 0x5a, 0x94, 0xfe, 0x48, 0x1e, 0x21, 0xde, 0xe7, 0x61, 0xec, 0xf0,
- 0x38, 0x64, 0xee, 0x53, 0xc6, 0x6d, 0xd7, 0xe6, 0x76, 0xe5, 0x48, 0xac, 0x61, 0xf8, 0xf7, 0xd2,
- 0xc4, 0xbc, 0x9d, 0x00, 0x6e, 0x47, 0xeb, 0x7e, 0x41, 0xda, 0xaa, 0x2c, 0x8b, 0x4a, 0x16, 0xf1,
- 0x20, 0x64, 0x95, 0xe2, 0xd7, 0x17, 0x58, 0x5e, 0xc9, 0x90, 0x02, 0xf2, 0xd3, 0xfd, 0xf5, 0x0e,
- 0x59, 0x39, 0xca, 0xab, 0xef, 0x1a, 0xf6, 0x09, 0x4c, 0xe4, 0xad, 0xcc, 0x37, 0xdd, 0xda, 0x12,
- 0x15, 0xa0, 0x88, 0x43, 0xc9, 0xa2, 0x87, 0x84, 0xa2, 0x7d, 0x20, 0xaa, 0x69, 0xf4, 0xd4, 0xe6,
- 0xa8, 0x95, 0x49, 0xf5, 0xff, 0x34, 0x31, 0x6b, 0xbc, 0x50, 0x83, 0x65, 0xbd, 0x5b, 0x68, 0x47,
- 0x2a, 0x87, 0xf2, 0xde, 0x15, 0x0e, 0x25, 0x8b, 0x7e, 0x4e, 0x36, 0xf2, 0x0c, 0xe8, 0x33, 0x9f,
- 0xab, 0x84, 0xa1, 0x69, 0x62, 0x56, 0x3c, 0x50, 0xb1, 0xf3, 0xf5, 0xd2, 0x6f, 0xbd, 0x5e, 0x7f,
- 0x34, 0x89, 0x8e, 0xfe, 0xac, 0x63, 0x39, 0x09, 0x60, 0xa7, 0xaa, 0x3c, 0xe5, 0x1d, 0x67, 0x1e,
- 0xa8, 0xd8, 0xf4, 0x6b, 0x72, 0xbf, 0x80, 0x3c, 0x09, 0x7e, 0xf0, 0xbd, 0xc0, 0x76, 0xb3, 0x55,
- 0x7b, 0x98, 0x26, 0x66, 0x3d, 0x01, 0xea, 0x61, 0xb1, 0x07, 0x4e, 0x09, 0xc3, 0x7c, 0x6e, 0xe6,
- 0x7b, 0xb0, 0xe8, 0x85, 0x1a, 0x8c, 0x3a, 0xe4, 0xa1, 0x48, 0xde, 0x4b, 0x60, 0xa7, 0x2c, 0x64,
- 0xbe, 0xc3, 0xdc, 0xfc, 0xfc, 0x75, 0xd6, 0x77, 0xb5, 0xbd, 0x15, 0xeb, 0x51, 0x9a, 0x98, 0x6f,
- 0x2d, 0x25, 0xcd, 0x0f, 0x29, 0x2c, 0x8f, 0x93, 0xdf, 0xd1, 0x95, 0x1b, 0x50, 0x60, 0x4b, 0xee,
- 0xe8, 0xf9, 0xfc, 0x80, 0x9d, 0x46, 0x87, 0x8c, 0x3b, 0xa3, 0xac, 0xb4, 0x15, 0xe7, 0x57, 0xf2,
- 0x42, 0x0d, 0xd6, 0xfd, 0x4d, 0x27, 0x3a, 0xf6, 0x23, 0xb6, 0x6f, 0xc4, 0x6c, 0x57, 0x76, 0x2a,
- 0x32, 0xaa, 0x78, 0x6e, 0xca, 0x1e, 0xa8, 0xd8, 0x25, 0xad, 0xac, 0x1d, 0x7a, 0x8d, 0x56, 0x56,
- 0x8d, 0x8a, 0x4d, 0x0f, 0xc8, 0x3d, 0x97, 0x39, 0xc1, 0x64, 0x1a, 0x62, 0xfa, 0xca, 0xae, 0x5b,
- 0x28, 0xbf, 0x9f, 0x26, 0xe6, 0xa2, 0x13, 0x16, 0xa1, 0x6a, 0x10, 0x39, 0x86, 0x76, 0x7d, 0x10,
- 0x39, 0x8c, 0x45, 0x88, 0x3e, 0x26, 0x9b, 0xd5, 0x71, 0xc8, 0xc2, 0xbc, 0x9d, 0x26, 0x66, 0xd5,
- 0x05, 0x55, 0x40, 0xc8, 0xf1, 0x2c, 0x3e, 0x89, 0xa7, 0xde, 0xd8, 0xb1, 0x85, 0xfc, 0x6e, 0x2e,
- 0xaf, 0xb8, 0xa0, 0x0a, 0x08, 0xf9, 0xb4, 0x52, 0x80, 0x49, 0x2e, 0xaf, 0xb8, 0xa0, 0x0a, 0xd0,
- 0x29, 0xd9, 0xcd, 0x16, 0x76, 0x49, 0x89, 0x54, 0x05, 0xfd, 0x9d, 0x34, 0x31, 0x5f, 0xcb, 0x85,
- 0xd7, 0x32, 0xe8, 0x25, 0x79, 0xbb, 0xb8, 0x86, 0xcb, 0x3a, 0x95, 0x65, 0xfe, 0xdd, 0x34, 0x31,
- 0x6f, 0x43, 0x87, 0xdb, 0x90, 0xba, 0x7f, 0x35, 0x89, 0x8e, 0x4f, 0x29, 0x51, 0x23, 0x99, 0xbc,
- 0x16, 0x0f, 0x83, 0xd8, 0x2f, 0x55, 0xe8, 0x22, 0x0e, 0x25, 0x8b, 0x7e, 0x49, 0xb6, 0xd8, 0xfc,
- 0x32, 0x3d, 0x8f, 0x45, 0xad, 0x97, 0x95, 0x46, 0xb7, 0x76, 0xd2, 0xc4, 0x5c, 0xf0, 0xc1, 0x02,
- 0x42, 0x3f, 0x25, 0xeb, 0x0a, 0xc3, 0xe2, 0x27, 0x1f, 0x38, 0xba, 0x75, 0x2f, 0x4d, 0xcc, 0xb2,
- 0x03, 0xca, 0xa6, 0x10, 0xe2, 0x8b, 0x0c, 0x98, 0xc3, 0xc6, 0x17, 0xd9, 0x73, 0x06, 0x85, 0x25,
- 0x07, 0x94, 0x4d, 0xf1, 0x30, 0x41, 0x00, 0x4b, 0xba, 0x4c, 0x2f, 0x7c, 0x98, 0x64, 0x20, 0xe4,
- 0x4d, 0xf1, 0xde, 0x09, 0xe5, 0x58, 0x65, 0x2e, 0xe9, 0xf2, 0xbd, 0x33, 0xc7, 0x20, 0x6b, 0x89,
- 0x05, 0x74, 0x8b, 0x25, 0xb2, 0x9d, 0x5f, 0x32, 0x45, 0x1c, 0x4a, 0x96, 0xc8, 0x37, 0x2c, 0x67,
- 0xc7, 0xcc, 0x1f, 0xf2, 0x51, 0x9f, 0x85, 0x17, 0xd9, 0x2b, 0x06, 0xf3, 0x6d, 0xc1, 0x09, 0x8b,
- 0x90, 0x35, 0xb8, 0xba, 0x36, 0x1a, 0x2f, 0xae, 0x8d, 0xc6, 0xcb, 0x6b, 0x43, 0xfb, 0x69, 0x66,
- 0x68, 0xbf, 0xcc, 0x0c, 0xed, 0xf9, 0xcc, 0xd0, 0xae, 0x66, 0x86, 0xf6, 0xf7, 0xcc, 0xd0, 0xfe,
- 0x99, 0x19, 0x8d, 0x97, 0x33, 0x43, 0xfb, 0xf9, 0xc6, 0x68, 0x5c, 0xdd, 0x18, 0x8d, 0x17, 0x37,
- 0x46, 0xe3, 0xfb, 0x0f, 0x86, 0x63, 0x3e, 0x8a, 0x07, 0xfb, 0x4e, 0x30, 0xe9, 0x0d, 0x43, 0xfb,
- 0xd4, 0xf6, 0xed, 0x9e, 0x17, 0x9c, 0x8d, 0x7b, 0x75, 0x7f, 0x14, 0x07, 0x2d, 0xfc, 0x1b, 0xf8,
- 0xf1, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xe8, 0xef, 0xe7, 0x47, 0x0e, 0x00, 0x00,
+ // 1241 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0x4b, 0x6f, 0xe3, 0x54,
+ 0x14, 0x8e, 0x27, 0xe3, 0xa4, 0xbd, 0x7d, 0xce, 0x6d, 0x87, 0xc9, 0x30, 0x92, 0x5d, 0x02, 0x23,
+ 0x8a, 0x40, 0x8d, 0x78, 0x48, 0x08, 0xc4, 0x48, 0xc8, 0x1d, 0x2a, 0x55, 0x6a, 0x45, 0x39, 0x81,
+ 0x0d, 0xac, 0x1c, 0xfb, 0x36, 0xb1, 0xea, 0xd8, 0xa9, 0x7d, 0x5d, 0xa6, 0x2b, 0xf8, 0x09, 0xec,
+ 0xf9, 0x03, 0x6c, 0x58, 0xb1, 0x42, 0x62, 0xc7, 0x66, 0x96, 0x5d, 0xce, 0xca, 0xa2, 0xe9, 0x06,
+ 0x79, 0x35, 0x12, 0x7f, 0x00, 0xdd, 0x47, 0x6c, 0x5f, 0xc7, 0x99, 0xe9, 0x26, 0xbe, 0xe7, 0x3b,
+ 0xdf, 0x77, 0xee, 0xc3, 0xe7, 0x1c, 0xdf, 0xa0, 0x9d, 0xc9, 0xd9, 0xb0, 0xe7, 0x87, 0xc3, 0x73,
+ 0x7f, 0x1c, 0xba, 0xc4, 0xef, 0xc5, 0xd4, 0xa6, 0xb1, 0xf8, 0xdd, 0x9b, 0x44, 0x21, 0x0d, 0xb1,
+ 0xce, 0x8d, 0x37, 0xb7, 0x87, 0xe1, 0x30, 0xe4, 0x48, 0x8f, 0x8d, 0x84, 0xb3, 0xfb, 0x9f, 0x86,
+ 0x5a, 0x40, 0xe2, 0xc4, 0xa7, 0xf8, 0x33, 0xd4, 0x8e, 0x93, 0xf1, 0xd8, 0x8e, 0x2e, 0x3b, 0xda,
+ 0x8e, 0xb6, 0xbb, 0xf2, 0xd1, 0xfa, 0x9e, 0x08, 0xd3, 0x17, 0xa8, 0xb5, 0xf1, 0x3c, 0x35, 0x1b,
+ 0x59, 0x6a, 0xce, 0x68, 0x30, 0x1b, 0x30, 0xe9, 0x79, 0x42, 0x22, 0x8f, 0x44, 0x9d, 0x3b, 0x8a,
+ 0xf4, 0x1b, 0x81, 0x16, 0x52, 0x49, 0x83, 0xd9, 0x00, 0x3f, 0x41, 0x4b, 0x5e, 0x30, 0x24, 0x31,
+ 0x25, 0x51, 0xa7, 0xc9, 0xb5, 0x1b, 0x52, 0x7b, 0x28, 0x61, 0x6b, 0x53, 0x8a, 0x73, 0x22, 0xe4,
+ 0x23, 0xfc, 0x09, 0x6a, 0x39, 0xb6, 0x33, 0x22, 0x71, 0xe7, 0x2e, 0x17, 0xaf, 0x49, 0xf1, 0x3e,
+ 0x07, 0xad, 0x35, 0x29, 0xd5, 0x39, 0x09, 0x24, 0xb7, 0xfb, 0xeb, 0x5d, 0xd4, 0x12, 0x0c, 0xfc,
+ 0x21, 0xd2, 0x9d, 0x51, 0x12, 0x9c, 0xc9, 0x3d, 0xaf, 0x96, 0xf5, 0x25, 0x39, 0xa3, 0x80, 0x78,
+ 0x30, 0x89, 0x17, 0xb8, 0xe4, 0x99, 0xdc, 0xeb, 0x02, 0x09, 0xa7, 0x80, 0x78, 0xb0, 0x65, 0x46,
+ 0xfc, 0x94, 0xe5, 0x1e, 0x55, 0xcd, 0xba, 0xd4, 0x48, 0x0e, 0xc8, 0x27, 0xde, 0x47, 0x2b, 0x9c,
+ 0x26, 0x5e, 0x90, 0xdc, 0xa1, 0x2a, 0xdd, 0x92, 0xd2, 0x32, 0x11, 0xca, 0x06, 0x3e, 0x40, 0xab,
+ 0x17, 0xa1, 0x9f, 0x8c, 0x89, 0x8c, 0xa2, 0xd7, 0x44, 0xd9, 0x96, 0x51, 0x14, 0x26, 0x28, 0x16,
+ 0x8b, 0x13, 0xb3, 0x57, 0x36, 0x5b, 0x4d, 0xeb, 0x55, 0x71, 0xca, 0x4c, 0x50, 0x2c, 0xb6, 0x29,
+ 0xdf, 0x1e, 0x10, 0x5f, 0x86, 0x69, 0xbf, 0x6a, 0x53, 0x25, 0x22, 0x94, 0x0d, 0xfc, 0x03, 0xda,
+ 0xf2, 0x82, 0x98, 0xda, 0x01, 0x3d, 0x26, 0x34, 0xf2, 0x1c, 0x19, 0x6c, 0xa9, 0x26, 0xd8, 0x23,
+ 0x19, 0xac, 0x4e, 0x00, 0x75, 0x60, 0xf7, 0xcf, 0x16, 0x6a, 0xcb, 0x9c, 0xc7, 0xdf, 0xa1, 0x07,
+ 0x83, 0x4b, 0x4a, 0xe2, 0x93, 0x28, 0x74, 0x48, 0x1c, 0x13, 0xf7, 0x84, 0x44, 0x7d, 0xe2, 0x84,
+ 0x81, 0xcb, 0x13, 0xa6, 0x69, 0x3d, 0xca, 0x52, 0x73, 0x11, 0x05, 0x16, 0x39, 0x58, 0x58, 0xdf,
+ 0x0b, 0x6a, 0xc3, 0xde, 0x29, 0xc2, 0x2e, 0xa0, 0xc0, 0x22, 0x07, 0x3e, 0x44, 0x5b, 0x34, 0xa4,
+ 0xb6, 0x6f, 0x29, 0xd3, 0xf2, 0x9c, 0x6b, 0x5a, 0x0f, 0xd8, 0x21, 0xd4, 0xb8, 0xa1, 0x0e, 0xcc,
+ 0x43, 0x1d, 0x29, 0x53, 0xf1, 0x1c, 0x2c, 0x87, 0x52, 0xdd, 0x50, 0x07, 0xe2, 0x5d, 0xb4, 0x44,
+ 0x9e, 0x11, 0xe7, 0x5b, 0x6f, 0x4c, 0x78, 0xf6, 0x69, 0xd6, 0x2a, 0xab, 0xe6, 0x19, 0x06, 0xf9,
+ 0x08, 0xbf, 0x8f, 0x96, 0xcf, 0x13, 0x92, 0x10, 0x4e, 0x6d, 0x71, 0xea, 0x5a, 0x96, 0x9a, 0x05,
+ 0x08, 0xc5, 0x10, 0xef, 0x21, 0x14, 0x27, 0x03, 0xd1, 0x47, 0x62, 0x9e, 0x47, 0x4d, 0x6b, 0x3d,
+ 0x4b, 0xcd, 0x12, 0x0a, 0xa5, 0x31, 0x3e, 0x42, 0xdb, 0x7c, 0x75, 0x5f, 0x05, 0x54, 0xa4, 0x23,
+ 0x4d, 0xa2, 0x80, 0xb8, 0x3c, 0x69, 0x9a, 0x56, 0x27, 0x4b, 0xcd, 0x5a, 0x3f, 0xd4, 0xa2, 0xb8,
+ 0x8b, 0x5a, 0xf1, 0xc4, 0xf7, 0x68, 0xdc, 0x59, 0xe6, 0x7a, 0xc4, 0xea, 0x57, 0x20, 0x20, 0x9f,
+ 0x9c, 0x33, 0xb2, 0x23, 0x37, 0xee, 0xa0, 0x12, 0x87, 0x23, 0x20, 0x9f, 0xf9, 0xaa, 0x4e, 0xc2,
+ 0x98, 0x1e, 0x78, 0x3e, 0x25, 0x11, 0x3f, 0xbd, 0xce, 0x4a, 0x65, 0x55, 0x15, 0x3f, 0xd4, 0xa2,
+ 0xf8, 0x27, 0xf4, 0x98, 0xe3, 0x7d, 0x1a, 0x25, 0x0e, 0x4d, 0x22, 0xe2, 0x1e, 0x13, 0x6a, 0xbb,
+ 0x36, 0xb5, 0x2b, 0x29, 0xb1, 0xca, 0xc3, 0xbf, 0x97, 0xa5, 0xe6, 0xed, 0x04, 0x70, 0x3b, 0x5a,
+ 0xf7, 0x0b, 0xd4, 0x96, 0x3d, 0x9f, 0xb5, 0xc9, 0x98, 0x86, 0x11, 0xa9, 0x74, 0xd6, 0x3e, 0xc3,
+ 0x8a, 0x36, 0xc9, 0x29, 0x20, 0x1e, 0xdd, 0xdf, 0xef, 0xa0, 0xa5, 0xc3, 0xa2, 0xb5, 0xaf, 0xf2,
+ 0x39, 0x81, 0xb0, 0x3a, 0x16, 0xf5, 0xa6, 0x5b, 0x9b, 0xac, 0xbd, 0x94, 0x71, 0x50, 0x2c, 0x7c,
+ 0x80, 0x30, 0xb7, 0xf7, 0x59, 0xab, 0x8e, 0x8f, 0x6d, 0xca, 0xb5, 0xa2, 0xa8, 0xde, 0xc8, 0x52,
+ 0xb3, 0xc6, 0x0b, 0x35, 0x58, 0x3e, 0xbb, 0xc5, 0xed, 0x58, 0xd6, 0x50, 0x31, 0xbb, 0xc4, 0x41,
+ 0xb1, 0xf0, 0xe7, 0x68, 0xbd, 0xa8, 0x80, 0x3e, 0x09, 0xa8, 0x2c, 0x18, 0x9c, 0xa5, 0x66, 0xc5,
+ 0x03, 0x15, 0xbb, 0x38, 0x2f, 0xfd, 0xd6, 0xe7, 0xf5, 0x57, 0x13, 0xe9, 0xdc, 0x9f, 0x4f, 0x2c,
+ 0x36, 0x01, 0xe4, 0x54, 0xb6, 0xa7, 0x62, 0xe2, 0xdc, 0x03, 0x15, 0x1b, 0x7f, 0x8d, 0xee, 0x97,
+ 0x90, 0xa7, 0xe1, 0x8f, 0x81, 0x1f, 0xda, 0x6e, 0x7e, 0x6a, 0x0f, 0xb3, 0xd4, 0xac, 0x27, 0x40,
+ 0x3d, 0xcc, 0xde, 0x81, 0xa3, 0x60, 0xbc, 0x9e, 0x9b, 0xc5, 0x3b, 0x98, 0xf7, 0x42, 0x0d, 0x86,
+ 0x1d, 0xf4, 0x90, 0x15, 0xef, 0x25, 0x90, 0x53, 0x12, 0x91, 0xc0, 0x21, 0x6e, 0x91, 0x7f, 0x9d,
+ 0xb5, 0x1d, 0x6d, 0x77, 0xc9, 0x7a, 0x9c, 0xa5, 0xe6, 0x5b, 0x0b, 0x49, 0xb3, 0x24, 0x85, 0xc5,
+ 0x71, 0x8a, 0x0b, 0x40, 0xe5, 0xf3, 0xca, 0xb0, 0x05, 0x17, 0x80, 0xd9, 0xfe, 0x80, 0x9c, 0xc6,
+ 0x07, 0x84, 0x3a, 0xa3, 0xbc, 0xb5, 0x95, 0xf7, 0xa7, 0x78, 0xa1, 0x06, 0xeb, 0xfe, 0xa1, 0x23,
+ 0x9d, 0xcf, 0xc3, 0x5e, 0xdf, 0x88, 0xd8, 0xae, 0x98, 0x94, 0x55, 0x54, 0x39, 0x6f, 0x54, 0x0f,
+ 0x54, 0x6c, 0x45, 0x2b, 0x7a, 0x87, 0x5e, 0xa3, 0x15, 0x5d, 0xa3, 0x62, 0xe3, 0x7d, 0x74, 0xcf,
+ 0x25, 0x4e, 0x38, 0x9e, 0x44, 0xbc, 0x7c, 0xc5, 0xd4, 0x2d, 0x2e, 0xbf, 0x9f, 0xa5, 0xe6, 0xbc,
+ 0x13, 0xe6, 0xa1, 0x6a, 0x10, 0xb1, 0x86, 0x76, 0x7d, 0x10, 0xb1, 0x8c, 0x79, 0x08, 0x3f, 0x41,
+ 0x1b, 0xd5, 0x75, 0x88, 0xc6, 0xbc, 0x95, 0xa5, 0x66, 0xd5, 0x05, 0x55, 0x80, 0xc9, 0x79, 0x2e,
+ 0x3e, 0x4d, 0x26, 0xbe, 0xe7, 0xd8, 0x4c, 0xbe, 0x5c, 0xc8, 0x2b, 0x2e, 0xa8, 0x02, 0x4c, 0x3e,
+ 0xa9, 0x34, 0x60, 0x54, 0xc8, 0x2b, 0x2e, 0xa8, 0x02, 0x78, 0x82, 0x76, 0xf2, 0x83, 0x5d, 0xd0,
+ 0x22, 0x65, 0x43, 0x7f, 0x27, 0x4b, 0xcd, 0xd7, 0x72, 0xe1, 0xb5, 0x0c, 0x7c, 0x89, 0xde, 0x2e,
+ 0x9f, 0xe1, 0xa2, 0x49, 0x45, 0x9b, 0x7f, 0x37, 0x4b, 0xcd, 0xdb, 0xd0, 0xe1, 0x36, 0xa4, 0xee,
+ 0xdf, 0x4d, 0xa4, 0xf3, 0xab, 0x15, 0xeb, 0x91, 0x44, 0x7c, 0x16, 0x0f, 0xc2, 0x24, 0x50, 0x3a,
+ 0x74, 0x19, 0x07, 0xc5, 0xc2, 0x5f, 0xa2, 0x4d, 0x32, 0xfb, 0x98, 0x9e, 0x27, 0xac, 0xd7, 0x8b,
+ 0x4e, 0xa3, 0x5b, 0xdb, 0x59, 0x6a, 0xce, 0xf9, 0x60, 0x0e, 0xc1, 0x9f, 0xa2, 0x35, 0x89, 0xf1,
+ 0xe6, 0x27, 0x2e, 0x38, 0xba, 0x75, 0x2f, 0x4b, 0x4d, 0xd5, 0x01, 0xaa, 0xc9, 0x84, 0xfc, 0x46,
+ 0x06, 0xc4, 0x21, 0xde, 0x45, 0x7e, 0x9d, 0xe1, 0x42, 0xc5, 0x01, 0xaa, 0xc9, 0x2e, 0x26, 0x1c,
+ 0xe0, 0x2d, 0x5d, 0x94, 0x17, 0xbf, 0x98, 0xe4, 0x20, 0x14, 0x43, 0x76, 0xdf, 0x89, 0xc4, 0x5a,
+ 0x45, 0x2d, 0xe9, 0xe2, 0xbe, 0x33, 0xc3, 0x20, 0x1f, 0xb1, 0x03, 0x74, 0xcb, 0x2d, 0xb2, 0x5d,
+ 0x7c, 0x64, 0xca, 0x38, 0x28, 0x16, 0xab, 0x37, 0xde, 0xce, 0x8e, 0x48, 0x30, 0xa4, 0xa3, 0x3e,
+ 0x89, 0x2e, 0xf2, 0x5b, 0x0c, 0xaf, 0xb7, 0x39, 0x27, 0xcc, 0x43, 0xd6, 0xe0, 0xea, 0xda, 0x68,
+ 0xbc, 0xb8, 0x36, 0x1a, 0x2f, 0xaf, 0x0d, 0xed, 0xe7, 0xa9, 0xa1, 0xfd, 0x36, 0x35, 0xb4, 0xe7,
+ 0x53, 0x43, 0xbb, 0x9a, 0x1a, 0xda, 0x3f, 0x53, 0x43, 0xfb, 0x77, 0x6a, 0x34, 0x5e, 0x4e, 0x0d,
+ 0xed, 0x97, 0x1b, 0xa3, 0x71, 0x75, 0x63, 0x34, 0x5e, 0xdc, 0x18, 0x8d, 0xef, 0x3f, 0x18, 0x7a,
+ 0x74, 0x94, 0x0c, 0xf6, 0x9c, 0x70, 0xdc, 0x1b, 0x46, 0xf6, 0xa9, 0x1d, 0xd8, 0x3d, 0x3f, 0x3c,
+ 0xf3, 0x7a, 0x75, 0xff, 0x42, 0x07, 0x2d, 0xfe, 0x1f, 0xf3, 0xe3, 0xff, 0x03, 0x00, 0x00, 0xff,
+ 0xff, 0x38, 0x60, 0xd8, 0x7d, 0xa4, 0x0e, 0x00, 0x00,
}
func (this *Result) Equal(that interface{}) bool {
@@ -925,6 +935,9 @@ func (this *Caches) Equal(that interface{}) bool {
if !this.LabelResult.Equal(&that1.LabelResult) {
return false
}
+ if !this.InstantMetricResult.Equal(&that1.InstantMetricResult) {
+ return false
+ }
return true
}
func (this *Summary) Equal(that interface{}) bool {
@@ -1193,7 +1206,7 @@ func (this *Caches) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 11)
+ s := make([]string, 0, 12)
s = append(s, "&stats.Caches{")
s = append(s, "Chunk: "+strings.Replace(this.Chunk.GoString(), `&`, ``, 1)+",\n")
s = append(s, "Index: "+strings.Replace(this.Index.GoString(), `&`, ``, 1)+",\n")
@@ -1202,6 +1215,7 @@ func (this *Caches) GoString() string {
s = append(s, "VolumeResult: "+strings.Replace(this.VolumeResult.GoString(), `&`, ``, 1)+",\n")
s = append(s, "SeriesResult: "+strings.Replace(this.SeriesResult.GoString(), `&`, ``, 1)+",\n")
s = append(s, "LabelResult: "+strings.Replace(this.LabelResult.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "InstantMetricResult: "+strings.Replace(this.InstantMetricResult.GoString(), `&`, ``, 1)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -1391,6 +1405,16 @@ func (m *Caches) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ {
+ size, err := m.InstantMetricResult.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintStats(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
{
size, err := m.LabelResult.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -1877,6 +1901,8 @@ func (m *Caches) Size() (n int) {
n += 1 + l + sovStats(uint64(l))
l = m.LabelResult.Size()
n += 1 + l + sovStats(uint64(l))
+ l = m.InstantMetricResult.Size()
+ n += 1 + l + sovStats(uint64(l))
return n
}
@@ -2085,6 +2111,7 @@ func (this *Caches) String() string {
`VolumeResult:` + strings.Replace(strings.Replace(this.VolumeResult.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
`SeriesResult:` + strings.Replace(strings.Replace(this.SeriesResult.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
`LabelResult:` + strings.Replace(strings.Replace(this.LabelResult.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
+ `InstantMetricResult:` + strings.Replace(strings.Replace(this.InstantMetricResult.String(), "Cache", "Cache", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -2637,6 +2664,39 @@ func (m *Caches) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InstantMetricResult", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthStats
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthStats
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.InstantMetricResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipStats(dAtA[iNdEx:])
diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto
index 8db5b474a790..d36b8e557d98 100644
--- a/pkg/logqlmodel/stats/stats.proto
+++ b/pkg/logqlmodel/stats/stats.proto
@@ -57,6 +57,10 @@ message Caches {
(gogoproto.nullable) = false,
(gogoproto.jsontag) = "labelResult"
];
+ Cache instantMetricResult = 8 [
+ (gogoproto.nullable) = false,
+ (gogoproto.jsontag) = "instantMetricResult"
+ ];
}
// Summary is the summary of a query statistics.
diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go
index 9817c04afdc5..1914c8ab3edf 100644
--- a/pkg/loki/config_wrapper.go
+++ b/pkg/loki/config_wrapper.go
@@ -646,6 +646,13 @@ func applyEmbeddedCacheConfig(r *ConfigWrapper) {
r.QueryRange.LabelsCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig
r.QueryRange.LabelsCacheConfig.CacheConfig.Prefix = prefix
}
+
+ instantMetricCacheConfig := r.QueryRange.InstantMetricCacheConfig.CacheConfig
+ if !cache.IsCacheConfigured(instantMetricCacheConfig) {
+ prefix := instantMetricCacheConfig.Prefix
+ r.QueryRange.InstantMetricCacheConfig.CacheConfig = r.QueryRange.ResultsCacheConfig.CacheConfig
+ r.QueryRange.InstantMetricCacheConfig.CacheConfig.Prefix = prefix
+ }
}
func applyIngesterFinalSleep(cfg *ConfigWrapper) {
diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go
index 866079b71f60..3b1237dad4d1 100644
--- a/pkg/loki/config_wrapper_test.go
+++ b/pkg/loki/config_wrapper_test.go
@@ -1055,6 +1055,49 @@ query_range:
})
})
+ t.Run("for the instant-metric results cache config", func(t *testing.T) {
+ t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) {
+ configFileString := `---
+query_range:
+ instant_metric_results_cache:
+ cache:
+ redis:
+ endpoint: endpoint.redis.org`
+
+ config, _, _ := configWrapperFromYAML(t, configFileString, nil)
+ assert.EqualValues(t, "endpoint.redis.org", config.QueryRange.InstantMetricCacheConfig.CacheConfig.Redis.Endpoint)
+ assert.EqualValues(t, "frontend.instant-metric-results-cache.", config.QueryRange.InstantMetricCacheConfig.CacheConfig.Prefix)
+ assert.False(t, config.QueryRange.InstantMetricCacheConfig.CacheConfig.EmbeddedCache.Enabled)
+ })
+
+ t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) {
+ configFileString := `---
+query_range:
+ instant_metric_results_cache:
+ cache:
+ memcached_client:
+ host: memcached.host.org`
+
+ config, _, _ := configWrapperFromYAML(t, configFileString, nil)
+ assert.EqualValues(t, "memcached.host.org", config.QueryRange.InstantMetricCacheConfig.CacheConfig.MemcacheClient.Host)
+ assert.EqualValues(t, "frontend.instant-metric-results-cache.", config.QueryRange.InstantMetricCacheConfig.CacheConfig.Prefix)
+ assert.False(t, config.QueryRange.InstantMetricCacheConfig.CacheConfig.EmbeddedCache.Enabled)
+ })
+
+ t.Run("embedded cache is enabled by default if no other cache is set", func(t *testing.T) {
+ config, _, _ := configWrapperFromYAML(t, minimalConfig, nil)
+ assert.True(t, config.QueryRange.InstantMetricCacheConfig.CacheConfig.EmbeddedCache.Enabled)
+ assert.EqualValues(t, "frontend.instant-metric-results-cache.", config.QueryRange.InstantMetricCacheConfig.CacheConfig.Prefix)
+ })
+
+ t.Run("gets results cache config if not configured directly", func(t *testing.T) {
+ config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil)
+ assert.EqualValues(t, "memcached.host.org", config.QueryRange.InstantMetricCacheConfig.CacheConfig.MemcacheClient.Host)
+ assert.EqualValues(t, "frontend.instant-metric-results-cache.", config.QueryRange.InstantMetricCacheConfig.CacheConfig.Prefix)
+ assert.False(t, config.QueryRange.InstantMetricCacheConfig.CacheConfig.EmbeddedCache.Enabled)
+ })
+ })
+
t.Run("for the labels results cache config", func(t *testing.T) {
t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) {
configFileString := `---
diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go
index 976665df95b9..52e3cc8551b7 100644
--- a/pkg/querier/queryrange/codec_test.go
+++ b/pkg/querier/queryrange/codec_test.go
@@ -427,10 +427,12 @@ func Test_codec_DecodeResponse(t *testing.T) {
func Test_codec_DecodeProtobufResponseParity(t *testing.T) {
// test fixtures from pkg/util/marshal_test
var queryTests = []struct {
+ name string
actual parser.Value
expected string
}{
{
+ "basic",
logqlmodel.Streams{
logproto.Stream{
Entries: []logproto.Entry{
@@ -462,6 +464,7 @@ func Test_codec_DecodeProtobufResponseParity(t *testing.T) {
},
// vector test
{
+ "vector",
promql.Vector{
{
T: 1568404331324,
@@ -524,6 +527,7 @@ func Test_codec_DecodeProtobufResponseParity(t *testing.T) {
},
// matrix test
{
+ "matrix",
promql.Matrix{
{
Floats: []promql.FPoint{
@@ -607,50 +611,53 @@ func Test_codec_DecodeProtobufResponseParity(t *testing.T) {
}
codec := RequestProtobufCodec{}
for i, queryTest := range queryTests {
- params := url.Values{
- "query": []string{`{app="foo"}`},
- }
- u := &url.URL{
- Path: "/loki/api/v1/query_range",
- RawQuery: params.Encode(),
- }
- httpReq := &http.Request{
- Method: "GET",
- RequestURI: u.String(),
- URL: u,
- }
- req, err := codec.DecodeRequest(context.TODO(), httpReq, nil)
- require.NoError(t, err)
+ i := i
+ t.Run(queryTest.name, func(t *testing.T) {
+ params := url.Values{
+ "query": []string{`{app="foo"}`},
+ }
+ u := &url.URL{
+ Path: "/loki/api/v1/query_range",
+ RawQuery: params.Encode(),
+ }
+ httpReq := &http.Request{
+ Method: "GET",
+ RequestURI: u.String(),
+ URL: u,
+ }
+ req, err := codec.DecodeRequest(context.TODO(), httpReq, nil)
+ require.NoError(t, err)
- // parser.Value -> queryrange.QueryResponse
- var b bytes.Buffer
- result := logqlmodel.Result{
- Data: queryTest.actual,
- Statistics: statsResult,
- }
- err = WriteQueryResponseProtobuf(&logql.LiteralParams{}, result, &b)
- require.NoError(t, err)
+ // parser.Value -> queryrange.QueryResponse
+ var b bytes.Buffer
+ result := logqlmodel.Result{
+ Data: queryTest.actual,
+ Statistics: statsResult,
+ }
+ err = WriteQueryResponseProtobuf(&logql.LiteralParams{}, result, &b)
+ require.NoError(t, err)
- // queryrange.QueryResponse -> queryrangebase.Response
- querierResp := &http.Response{
- StatusCode: 200,
- Body: io.NopCloser(&b),
- Header: http.Header{
- "Content-Type": []string{ProtobufType},
- },
- }
- resp, err := codec.DecodeResponse(context.TODO(), querierResp, req)
- require.NoError(t, err)
+ // queryrange.QueryResponse -> queryrangebase.Response
+ querierResp := &http.Response{
+ StatusCode: 200,
+ Body: io.NopCloser(&b),
+ Header: http.Header{
+ "Content-Type": []string{ProtobufType},
+ },
+ }
+ resp, err := codec.DecodeResponse(context.TODO(), querierResp, req)
+ require.NoError(t, err)
- // queryrange.Response -> JSON
- ctx := user.InjectOrgID(context.Background(), "1")
- httpResp, err := codec.EncodeResponse(ctx, httpReq, resp)
- require.NoError(t, err)
+ // queryrange.Response -> JSON
+ ctx := user.InjectOrgID(context.Background(), "1")
+ httpResp, err := codec.EncodeResponse(ctx, httpReq, resp)
+ require.NoError(t, err)
- body, _ := io.ReadAll(httpResp.Body)
- require.JSONEqf(t, queryTest.expected, string(body), "Protobuf Decode Query Test %d failed", i)
+ body, err := io.ReadAll(httpResp.Body)
+ require.NoError(t, err)
+ require.JSONEqf(t, queryTest.expected, string(body), "Protobuf Decode Query Test %d failed", i)
+ })
}
-
}
func Test_codec_EncodeRequest(t *testing.T) {
@@ -1645,6 +1652,16 @@ var (
"downloadTime": 0,
"queryLengthServed": 0
},
+ "instantMetricResult": {
+ "entriesFound": 0,
+ "entriesRequested": 0,
+ "entriesStored": 0,
+ "bytesReceived": 0,
+ "bytesSent": 0,
+ "requests": 0,
+ "downloadTime": 0,
+ "queryLengthServed": 0
+ },
"result": {
"entriesFound": 0,
"entriesRequested": 0,
@@ -2027,13 +2044,14 @@ var (
},
Caches: stats.Caches{
- Chunk: stats.Cache{},
- Index: stats.Cache{},
- StatsResult: stats.Cache{},
- VolumeResult: stats.Cache{},
- SeriesResult: stats.Cache{},
- LabelResult: stats.Cache{},
- Result: stats.Cache{},
+ Chunk: stats.Cache{},
+ Index: stats.Cache{},
+ StatsResult: stats.Cache{},
+ VolumeResult: stats.Cache{},
+ SeriesResult: stats.Cache{},
+ LabelResult: stats.Cache{},
+ Result: stats.Cache{},
+ InstantMetricResult: stats.Cache{},
},
}
)
diff --git a/pkg/querier/queryrange/downstreamer.go b/pkg/querier/queryrange/downstreamer.go
index 31f8997ed767..4db8034291f6 100644
--- a/pkg/querier/queryrange/downstreamer.go
+++ b/pkg/querier/queryrange/downstreamer.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"reflect"
+ "time"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
@@ -14,6 +15,7 @@ import (
"github.com/prometheus/prometheus/promql/parser"
"github.com/grafana/loki/pkg/logql"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel"
"github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
@@ -27,6 +29,8 @@ const (
type DownstreamHandler struct {
limits Limits
next queryrangebase.Handler
+
+ splitAlign bool
}
func ParamsToLokiRequest(params logql.Params) queryrangebase.Request {
@@ -86,6 +90,7 @@ func (h DownstreamHandler) Downstreamer(ctx context.Context) logql.Downstreamer
parallelism: p,
locks: locks,
handler: h.next,
+ splitAlign: h.splitAlign,
}
}
@@ -94,16 +99,50 @@ type instance struct {
parallelism int
locks chan struct{}
handler queryrangebase.Handler
+
+ splitAlign bool
+}
+
+// withoutOffset returns the given query string with offsets removed and timestamp adjusted accordingly. If no offset is present in original query, it will be returned as is.
+func withoutOffset(query logql.DownstreamQuery) (string, time.Time, time.Time) {
+ expr := query.Params.GetExpression()
+
+ var (
+ newStart = query.Params.Start()
+ newEnd = query.Params.End()
+ )
+ expr.Walk(func(e syntax.Expr) {
+ switch rng := e.(type) {
+ case *syntax.RangeAggregationExpr:
+ off := rng.Left.Offset
+
+ if off != 0 {
+ rng.Left.Offset = 0 // remove offset
+
+ // adjust start and end time
+ newEnd = newEnd.Add(-off)
+ newStart = newStart.Add(-off)
+
+ }
+ }
+ })
+ return expr.String(), newStart, newEnd
}
func (in instance) Downstream(ctx context.Context, queries []logql.DownstreamQuery, acc logql.Accumulator) ([]logqlmodel.Result, error) {
return in.For(ctx, queries, acc, func(qry logql.DownstreamQuery) (logqlmodel.Result, error) {
- req := ParamsToLokiRequest(qry.Params).WithQuery(qry.Params.GetExpression().String())
+ var req queryrangebase.Request
+ if in.splitAlign {
+ qs, newStart, newEnd := withoutOffset(qry)
+ req = ParamsToLokiRequest(qry.Params).WithQuery(qs).WithStartEnd(newStart, newEnd)
+ } else {
+ req = ParamsToLokiRequest(qry.Params).WithQuery(qry.Params.GetExpression().String())
+ }
sp, ctx := opentracing.StartSpanFromContext(ctx, "DownstreamHandler.instance")
defer sp.Finish()
logger := spanlogger.FromContext(ctx)
defer logger.Finish()
- level.Debug(logger).Log("shards", fmt.Sprintf("%+v", qry.Params.Shards()), "query", req.GetQuery(), "step", req.GetStep(), "handler", reflect.TypeOf(in.handler))
+ level.Debug(logger).Log("shards", fmt.Sprintf("%+v", qry.Params.Shards()), "query", req.GetQuery(), "step", req.GetStep(), "handler", reflect.TypeOf(in.handler), "engine", "downstream")
res, err := in.handler.Do(ctx, req)
if err != nil {
diff --git a/pkg/querier/queryrange/downstreamer_test.go b/pkg/querier/queryrange/downstreamer_test.go
index a23f2a381b00..cadfceeee20e 100644
--- a/pkg/querier/queryrange/downstreamer_test.go
+++ b/pkg/querier/queryrange/downstreamer_test.go
@@ -3,6 +3,7 @@ package queryrange
import (
"context"
"errors"
+ "fmt"
"strconv"
"strings"
"sync"
@@ -12,6 +13,7 @@ import (
"github.com/grafana/dskit/user"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
@@ -325,71 +327,142 @@ func TestInstanceFor(t *testing.T) {
}
func TestInstanceDownstream(t *testing.T) {
- params, err := logql.NewLiteralParams(
- `{foo="bar"}`,
- time.Now(),
- time.Now(),
- 0,
- 0,
- logproto.BACKWARD,
- 1000,
- nil,
- )
- require.NoError(t, err)
- expr, err := syntax.ParseExpr(`{foo="bar"}`)
- require.NoError(t, err)
-
- expectedResp := func() *LokiResponse {
- return &LokiResponse{
- Data: LokiData{
- Result: []logproto.Stream{{
- Labels: `{foo="bar"}`,
- Entries: []logproto.Entry{
- {Timestamp: time.Unix(0, 0), Line: "foo"},
- },
- }},
+ t.Run("Downstream simple query", func(t *testing.T) {
+ ts := time.Unix(1, 0)
+
+ params, err := logql.NewLiteralParams(
+ `{foo="bar"}`,
+ ts,
+ ts,
+ 0,
+ 0,
+ logproto.BACKWARD,
+ 1000,
+ nil,
+ )
+ require.NoError(t, err)
+ expr, err := syntax.ParseExpr(`{foo="bar"}`)
+ require.NoError(t, err)
+
+ expectedResp := func() *LokiResponse {
+ return &LokiResponse{
+ Data: LokiData{
+ Result: []logproto.Stream{{
+ Labels: `{foo="bar"}`,
+ Entries: []logproto.Entry{
+ {Timestamp: time.Unix(0, 0), Line: "foo"},
+ },
+ }},
+ },
+ Statistics: stats.Result{
+ Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
+ },
+ }
+ }
+
+ queries := []logql.DownstreamQuery{
+ {
+ Params: logql.ParamsWithShardsOverride{
+ Params: logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: expr},
+ ShardsOverride: logql.Shards{{Shard: 0, Of: 2}}.Encode(),
+ },
},
- Statistics: stats.Result{
- Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
+ }
+
+ var got queryrangebase.Request
+ var want queryrangebase.Request
+ handler := queryrangebase.HandlerFunc(
+ func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
+ // for some reason these seemingly can't be checked in their own goroutines,
+ // so we assign them to scoped variables for later comparison.
+ got = req
+ want = ParamsToLokiRequest(queries[0].Params).WithQuery(expr.String())
+
+ return expectedResp(), nil
},
+ )
+
+ expected, err := ResponseToResult(expectedResp())
+ require.Nil(t, err)
+
+ results, err := DownstreamHandler{
+ limits: fakeLimits{},
+ next: handler,
+ }.Downstreamer(context.Background()).Downstream(context.Background(), queries, logql.NewBufferedAccumulator(len(queries)))
+
+ fmt.Println("want", want.GetEnd(), want.GetStart(), "got", got.GetEnd(), got.GetStart())
+ require.Equal(t, want, got)
+ require.Nil(t, err)
+ require.Equal(t, 1, len(results))
+ require.Equal(t, expected.Data, results[0].Data)
+ })
+
+ t.Run("Downstream with offset removed", func(t *testing.T) {
+ ts := time.Unix(1, 0)
+
+ params, err := logql.NewLiteralParams(
+ `sum(rate({foo="bar"}[2h] offset 1h))`,
+ ts,
+ ts,
+ 0,
+ 0,
+ logproto.BACKWARD,
+ 1000,
+ nil,
+ )
+ require.NoError(t, err)
+
+ expectedResp := func() *LokiResponse {
+ return &LokiResponse{
+ Data: LokiData{
+ Result: []logproto.Stream{{
+ Labels: `{foo="bar"}`,
+ Entries: []logproto.Entry{
+ {Timestamp: time.Unix(0, 0), Line: "foo"},
+ },
+ }},
+ },
+ Statistics: stats.Result{
+ Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
+ },
+ }
}
- }
- queries := []logql.DownstreamQuery{
- {
- Params: logql.ParamsWithShardsOverride{
- Params: logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: expr},
- ShardsOverride: logql.Shards{{Shard: 0, Of: 2}}.Encode(),
+ queries := []logql.DownstreamQuery{
+ {
+ Params: params,
},
- },
- }
+ }
- var got queryrangebase.Request
- var want queryrangebase.Request
- handler := queryrangebase.HandlerFunc(
- func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
- // for some reason these seemingly can't be checked in their own goroutines,
- // so we assign them to scoped variables for later comparison.
- got = req
- want = ParamsToLokiRequest(queries[0].Params).WithQuery(expr.String())
+ var got queryrangebase.Request
+ var want queryrangebase.Request
+ handler := queryrangebase.HandlerFunc(
+ func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
+ // for some reason these seemingly can't be checked in their own goroutines,
+ // so we assign them to scoped variables for later comparison.
+ got = req
+ want = ParamsToLokiRequest(params).WithQuery(`sum(rate({foo="bar"}[2h]))`).WithStartEnd(ts.Add(-1*time.Hour), ts.Add(-1*time.Hour)) // without offset and start, end adjusted for instant query
- return expectedResp(), nil
- },
- )
+ return expectedResp(), nil
+ },
+ )
- expected, err := ResponseToResult(expectedResp())
- require.Nil(t, err)
+ expected, err := ResponseToResult(expectedResp())
+ require.NoError(t, err)
- results, err := DownstreamHandler{
- limits: fakeLimits{},
- next: handler,
- }.Downstreamer(context.Background()).Downstream(context.Background(), queries, logql.NewBufferedAccumulator(len(queries)))
+ results, err := DownstreamHandler{
+ limits: fakeLimits{},
+ next: handler,
+ splitAlign: true,
+ }.Downstreamer(context.Background()).Downstream(context.Background(), queries, logql.NewBufferedAccumulator(len(queries)))
- require.Equal(t, want, got)
+ assert.Equal(t, want, got)
- require.Nil(t, err)
- require.Equal(t, 1, len(results))
- require.Equal(t, expected.Data, results[0].Data)
+ require.Nil(t, err)
+ require.Equal(t, 1, len(results))
+ require.Equal(t, expected.Data, results[0].Data)
+
+ })
}
func TestCancelWhileWaitingResponse(t *testing.T) {
diff --git a/pkg/querier/queryrange/instant_metric_cache.go b/pkg/querier/queryrange/instant_metric_cache.go
new file mode 100644
index 000000000000..ef1083e6cd22
--- /dev/null
+++ b/pkg/querier/queryrange/instant_metric_cache.go
@@ -0,0 +1,85 @@
+package queryrange
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "time"
+
+ "github.com/go-kit/log"
+
+ "github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
+ "github.com/grafana/loki/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
+)
+
+type InstantMetricSplitter struct {
+ Limits
+ transformer UserIDTransformer
+}
+
+// GenerateCacheKey generates a cache key based on the userID, Request and interval.
+func (i InstantMetricSplitter) GenerateCacheKey(ctx context.Context, userID string, r resultscache.Request) string {
+ split := i.InstantMetricQuerySplitDuration(userID)
+
+ var currentInterval int64
+ if denominator := int64(split / time.Millisecond); denominator > 0 {
+ currentInterval = r.GetStart().UnixMilli() / denominator
+ }
+
+ if i.transformer != nil {
+ userID = i.transformer(ctx, userID)
+ }
+
+ // include both the currentInterval and the split duration in key to ensure
+ // a cache key can't be reused when an interval changes
+ return fmt.Sprintf("instant-metric:%s:%s:%d:%d", userID, r.GetQuery(), currentInterval, split)
+}
+
+type InstantMetricCacheConfig struct {
+ queryrangebase.ResultsCacheConfig `yaml:",inline"`
+}
+
+// RegisterFlags registers flags.
+func (cfg *InstantMetricCacheConfig) RegisterFlags(f *flag.FlagSet) {
+ cfg.RegisterFlagsWithPrefix(f, "frontend.instant-metric-results-cache.")
+}
+
+func (cfg *InstantMetricCacheConfig) Validate() error {
+ return cfg.ResultsCacheConfig.Validate()
+}
+
+type instantMetricExtractor struct{}
+
+func NewInstantMetricCacheMiddleware(
+ log log.Logger,
+ limits Limits,
+ merger queryrangebase.Merger,
+ c cache.Cache,
+ cacheGenNumberLoader queryrangebase.CacheGenNumberLoader,
+ shouldCache queryrangebase.ShouldCacheFn,
+ parallelismForReq queryrangebase.ParallelismForReqFn,
+ retentionEnabled bool,
+ transformer UserIDTransformer,
+ metrics *queryrangebase.ResultsCacheMetrics,
+) (queryrangebase.Middleware, error) {
+ return queryrangebase.NewResultsCacheMiddleware(
+ log,
+ c,
+ InstantMetricSplitter{limits, transformer},
+ limits,
+ merger,
+ PrometheusExtractor{},
+ cacheGenNumberLoader,
+ func(ctx context.Context, r queryrangebase.Request) bool {
+ if shouldCache != nil && !shouldCache(ctx, r) {
+ return false
+ }
+ return true
+ },
+ parallelismForReq,
+ retentionEnabled,
+ false,
+ metrics,
+ )
+}
diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go
index 2d1453190969..ab7818460738 100644
--- a/pkg/querier/queryrange/limits.go
+++ b/pkg/querier/queryrange/limits.go
@@ -68,6 +68,15 @@ func (l limits) QuerySplitDuration(user string) time.Duration {
return *l.splitDuration
}
+func (l limits) InstantMetricQuerySplitDuration(user string) time.Duration {
+ // NOTE: It returns `splitDuration` for both instant and range queries.
+ // no need to have separate limits for now.
+ if l.splitDuration == nil {
+ return l.Limits.QuerySplitDuration(user)
+ }
+ return *l.splitDuration
+}
+
func (l limits) TSDBMaxQueryParallelism(ctx context.Context, user string) int {
if l.maxQueryParallelism == nil {
return l.Limits.TSDBMaxQueryParallelism(ctx, user)
diff --git a/pkg/querier/queryrange/limits/definitions.go b/pkg/querier/queryrange/limits/definitions.go
index 3e78b3442076..9e1232b75079 100644
--- a/pkg/querier/queryrange/limits/definitions.go
+++ b/pkg/querier/queryrange/limits/definitions.go
@@ -14,6 +14,7 @@ type Limits interface {
queryrangebase.Limits
logql.Limits
QuerySplitDuration(string) time.Duration
+ InstantMetricQuerySplitDuration(string) time.Duration
MetadataQuerySplitDuration(string) time.Duration
RecentMetadataQuerySplitDuration(string) time.Duration
RecentMetadataQueryWindow(string) time.Duration
diff --git a/pkg/querier/queryrange/prometheus_test.go b/pkg/querier/queryrange/prometheus_test.go
index a8e09b378bb2..4ec798b534a7 100644
--- a/pkg/querier/queryrange/prometheus_test.go
+++ b/pkg/querier/queryrange/prometheus_test.go
@@ -118,6 +118,16 @@ var emptyStats = `"stats": {
"downloadTime": 0,
"queryLengthServed": 0
},
+ "instantMetricResult": {
+ "entriesFound": 0,
+ "entriesRequested": 0,
+ "entriesStored": 0,
+ "bytesReceived": 0,
+ "bytesSent": 0,
+ "requests": 0,
+ "downloadTime": 0,
+ "queryLengthServed": 0
+ },
"result": {
"entriesFound": 0,
"entriesRequested": 0,
diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go
index 10246f4d8277..5532eab989c1 100644
--- a/pkg/querier/queryrange/roundtrip.go
+++ b/pkg/querier/queryrange/roundtrip.go
@@ -44,16 +44,19 @@ const (
// Config is the configuration for the queryrange tripperware
type Config struct {
- base.Config `yaml:",inline"`
- Transformer UserIDTransformer `yaml:"-"`
- CacheIndexStatsResults bool `yaml:"cache_index_stats_results"`
- StatsCacheConfig IndexStatsCacheConfig `yaml:"index_stats_results_cache" doc:"description=If a cache config is not specified and cache_index_stats_results is true, the config for the results cache is used."`
- CacheVolumeResults bool `yaml:"cache_volume_results"`
- VolumeCacheConfig VolumeCacheConfig `yaml:"volume_results_cache" doc:"description=If a cache config is not specified and cache_volume_results is true, the config for the results cache is used."`
- CacheSeriesResults bool `yaml:"cache_series_results"`
- SeriesCacheConfig SeriesCacheConfig `yaml:"series_results_cache" doc:"description=If series_results_cache is not configured and cache_series_results is true, the config for the results cache is used."`
- CacheLabelResults bool `yaml:"cache_label_results"`
- LabelsCacheConfig LabelsCacheConfig `yaml:"label_results_cache" doc:"description=If label_results_cache is not configured and cache_label_results is true, the config for the results cache is used."`
+ base.Config `yaml:",inline"`
+ Transformer UserIDTransformer `yaml:"-"`
+ CacheIndexStatsResults bool `yaml:"cache_index_stats_results"`
+ StatsCacheConfig IndexStatsCacheConfig `yaml:"index_stats_results_cache" doc:"description=If a cache config is not specified and cache_index_stats_results is true, the config for the results cache is used."`
+ CacheVolumeResults bool `yaml:"cache_volume_results"`
+ VolumeCacheConfig VolumeCacheConfig `yaml:"volume_results_cache" doc:"description=If a cache config is not specified and cache_volume_results is true, the config for the results cache is used."`
+ CacheInstantMetricResults bool `yaml:"cache_instant_metric_results"`
+ InstantMetricCacheConfig InstantMetricCacheConfig `yaml:"instant_metric_results_cache" doc:"description=If a cache config is not specified and cache_instant_metric_results is true, the config for the results cache is used."`
+ InstantMetricQuerySplitAlign bool `yaml:"instant_metric_query_split_align" doc:"description=Whether to align the splits of instant metric query with splitByInterval and query's exec time. Useful when instant_metric_cache is enabled"`
+ CacheSeriesResults bool `yaml:"cache_series_results"`
+ SeriesCacheConfig SeriesCacheConfig `yaml:"series_results_cache" doc:"description=If series_results_cache is not configured and cache_series_results is true, the config for the results cache is used."`
+ CacheLabelResults bool `yaml:"cache_label_results"`
+ LabelsCacheConfig LabelsCacheConfig `yaml:"label_results_cache" doc:"description=If label_results_cache is not configured and cache_label_results is true, the config for the results cache is used."`
}
// RegisterFlags adds the flags required to configure this flag set.
@@ -63,6 +66,9 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.StatsCacheConfig.RegisterFlags(f)
f.BoolVar(&cfg.CacheVolumeResults, "querier.cache-volume-results", false, "Cache volume query results.")
cfg.VolumeCacheConfig.RegisterFlags(f)
+ f.BoolVar(&cfg.CacheInstantMetricResults, "querier.cache-instant-metric-results", false, "Cache instant metric query results.")
+ cfg.InstantMetricCacheConfig.RegisterFlags(f)
+ f.BoolVar(&cfg.InstantMetricQuerySplitAlign, "querier.instant-metric-query-split-align", false, "Align the instant metric splits with splityByInterval and query's exec time.")
f.BoolVar(&cfg.CacheSeriesResults, "querier.cache-series-results", false, "Cache series query results.")
cfg.SeriesCacheConfig.RegisterFlags(f)
f.BoolVar(&cfg.CacheLabelResults, "querier.cache-label-results", false, "Cache label query results.")
@@ -132,12 +138,13 @@ func NewMiddleware(
metrics := NewMetrics(registerer, metricsNamespace)
var (
- resultsCache cache.Cache
- statsCache cache.Cache
- volumeCache cache.Cache
- seriesCache cache.Cache
- labelsCache cache.Cache
- err error
+ resultsCache cache.Cache
+ statsCache cache.Cache
+ volumeCache cache.Cache
+ instantMetricCache cache.Cache
+ seriesCache cache.Cache
+ labelsCache cache.Cache
+ err error
)
if cfg.CacheResults {
@@ -161,6 +168,13 @@ func NewMiddleware(
}
}
+ if cfg.CacheInstantMetricResults {
+ instantMetricCache, err = newResultsCacheFromConfig(cfg.InstantMetricCacheConfig.ResultsCacheConfig, registerer, log, stats.InstantMetricResultsCache)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
if cfg.CacheSeriesResults {
seriesCache, err = newResultsCacheFromConfig(cfg.SeriesCacheConfig.ResultsCacheConfig, registerer, log, stats.SeriesResultCache)
if err != nil {
@@ -211,7 +225,7 @@ func NewMiddleware(
return nil, nil, err
}
- instantMetricTripperware, err := NewInstantMetricTripperware(cfg, engineOpts, log, limits, schema, metrics, indexStatsTripperware, metricsNamespace)
+ instantMetricTripperware, err := NewInstantMetricTripperware(cfg, engineOpts, log, limits, schema, metrics, codec, instantMetricCache, cacheGenNumLoader, retentionEnabled, indexStatsTripperware, metricsNamespace)
if err != nil {
return nil, nil, err
}
@@ -761,7 +775,51 @@ func NewMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logge
}
// NewInstantMetricTripperware creates a new frontend tripperware responsible for handling metric queries
-func NewInstantMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, metrics *Metrics, indexStatsTripperware base.Middleware, metricsNamespace string) (base.Middleware, error) {
+func NewInstantMetricTripperware(
+ cfg Config,
+ engineOpts logql.EngineOpts,
+ log log.Logger,
+ limits Limits,
+ schema config.SchemaConfig,
+ metrics *Metrics,
+ merger base.Merger,
+ c cache.Cache,
+ cacheGenNumLoader base.CacheGenNumberLoader,
+ retentionEnabled bool,
+ indexStatsTripperware base.Middleware,
+ metricsNamespace string,
+) (base.Middleware, error) {
+ var cacheMiddleware base.Middleware
+ if cfg.CacheInstantMetricResults {
+ var err error
+ cacheMiddleware, err = NewInstantMetricCacheMiddleware(
+ log,
+ limits,
+ merger,
+ c,
+ cacheGenNumLoader,
+ func(_ context.Context, r base.Request) bool {
+ return !r.GetCachingOptions().Disabled
+ },
+ func(ctx context.Context, tenantIDs []string, r base.Request) int {
+ return MinWeightedParallelism(
+ ctx,
+ tenantIDs,
+ schema.Configs,
+ limits,
+ model.Time(r.GetStart().UnixMilli()),
+ model.Time(r.GetEnd().UnixMilli()),
+ )
+ },
+ retentionEnabled,
+ cfg.Transformer,
+ metrics.ResultsCacheMetrics,
+ )
+ if err != nil {
+ return nil, err
+ }
+ }
+
return base.MiddlewareFunc(func(next base.Handler) base.Handler {
statsHandler := indexStatsTripperware.Wrap(next)
@@ -769,11 +827,19 @@ func NewInstantMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log lo
StatsCollectorMiddleware(),
NewLimitsMiddleware(limits),
NewQuerySizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler),
+ NewSplitByRangeMiddleware(log, engineOpts, limits, cfg.InstantMetricQuerySplitAlign, metrics.MiddlewareMapperMetrics.rangeMapper),
+ }
+
+ if cfg.CacheInstantMetricResults {
+ queryRangeMiddleware = append(
+ queryRangeMiddleware,
+ base.InstrumentMiddleware("instant_metric_results_cache", metrics.InstrumentMiddlewareMetrics),
+ cacheMiddleware,
+ )
}
if cfg.ShardedQueries {
queryRangeMiddleware = append(queryRangeMiddleware,
- NewSplitByRangeMiddleware(log, engineOpts, limits, metrics.MiddlewareMapperMetrics.rangeMapper),
NewQueryShardMiddleware(
log,
schema.Configs,
diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go
index 7d74b0dd615c..206822a50f6e 100644
--- a/pkg/querier/queryrange/roundtrip_test.go
+++ b/pkg/querier/queryrange/roundtrip_test.go
@@ -1247,6 +1247,7 @@ type fakeLimits struct {
metadataSplitDuration map[string]time.Duration
recentMetadataSplitDuration map[string]time.Duration
recentMetadataQueryWindow map[string]time.Duration
+ instantMetricSplitDuration map[string]time.Duration
ingesterSplitDuration map[string]time.Duration
minShardingLookback time.Duration
queryTimeout time.Duration
@@ -1266,6 +1267,13 @@ func (f fakeLimits) QuerySplitDuration(key string) time.Duration {
return f.splitDuration[key]
}
+func (f fakeLimits) InstantMetricQuerySplitDuration(key string) time.Duration {
+ if f.instantMetricSplitDuration == nil {
+ return 0
+ }
+ return f.instantMetricSplitDuration[key]
+}
+
func (f fakeLimits) MetadataQuerySplitDuration(key string) time.Duration {
if f.metadataSplitDuration == nil {
return 0
diff --git a/pkg/querier/queryrange/split_by_range.go b/pkg/querier/queryrange/split_by_range.go
index 6845846d4dea..16076cd94859 100644
--- a/pkg/querier/queryrange/split_by_range.go
+++ b/pkg/querier/queryrange/split_by_range.go
@@ -26,20 +26,25 @@ type splitByRange struct {
limits Limits
ng *logql.DownstreamEngine
metrics *logql.MapperMetrics
+
+ // Whether to align rangeInterval align to splitByInterval in the subqueries.
+ splitAlign bool
}
// NewSplitByRangeMiddleware creates a new Middleware that splits log requests by the range interval.
-func NewSplitByRangeMiddleware(logger log.Logger, engineOpts logql.EngineOpts, limits Limits, metrics *logql.MapperMetrics) queryrangebase.Middleware {
+func NewSplitByRangeMiddleware(logger log.Logger, engineOpts logql.EngineOpts, limits Limits, splitAlign bool, metrics *logql.MapperMetrics) queryrangebase.Middleware {
return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler {
return &splitByRange{
logger: log.With(logger, "middleware", "InstantQuery.splitByRangeVector"),
next: next,
limits: limits,
ng: logql.NewDownstreamEngine(engineOpts, DownstreamHandler{
- limits: limits,
- next: next,
+ limits: limits,
+ next: next,
+ splitAlign: splitAlign,
}, limits, logger),
- metrics: metrics,
+ metrics: metrics,
+ splitAlign: splitAlign,
}
})
}
@@ -57,14 +62,26 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) (
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
}
- interval := validation.SmallestPositiveNonZeroDurationPerTenant(tenants, s.limits.QuerySplitDuration)
+ interval := validation.SmallestPositiveNonZeroDurationPerTenant(tenants, s.limits.InstantMetricQuerySplitDuration)
// if no interval configured, continue to the next middleware
if interval == 0 {
return s.next.Do(ctx, request)
}
mapperStats := logql.NewMapperStats()
- mapper, err := logql.NewRangeMapper(interval, s.metrics, mapperStats)
+
+ ir, ok := request.(*LokiInstantRequest)
+ if !ok {
+ return nil, fmt.Errorf("expected *LokiInstantRequest, got %T", request)
+ }
+
+ var mapper logql.RangeMapper
+
+ if s.splitAlign {
+ mapper, err = logql.NewRangeMapperWithSplitAlign(interval, ir.TimeTs, s.metrics, mapperStats)
+ } else {
+ mapper, err = logql.NewRangeMapper(interval, s.metrics, mapperStats)
+ }
if err != nil {
return nil, err
}
@@ -85,10 +102,6 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) (
queryStatsCtx := stats.FromContext(ctx)
queryStatsCtx.AddSplitQueries(int64(mapperStats.GetSplitQueries()))
- if _, ok := request.(*LokiInstantRequest); !ok {
- return nil, fmt.Errorf("expected *LokiInstantRequest, got %T", request)
- }
-
query := s.ng.Query(ctx, logql.ParamsWithExpressionOverride{Params: params, ExpressionOverride: parsed})
res, err := query.Exec(ctx)
diff --git a/pkg/querier/queryrange/split_by_range_test.go b/pkg/querier/queryrange/split_by_range_test.go
index b1687611abc1..af66c10a2f08 100644
--- a/pkg/querier/queryrange/split_by_range_test.go
+++ b/pkg/querier/queryrange/split_by_range_test.go
@@ -8,6 +8,7 @@ import (
"github.com/go-kit/log"
"github.com/grafana/dskit/user"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/loghttp"
@@ -17,14 +18,291 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
)
+func Test_RangeVectorSplitAlign(t *testing.T) {
+ var (
+ twelve34 = time.Date(1970, 1, 1, 12, 34, 0, 0, time.UTC) // 1970 12:34:00 UTC
+ twelve = time.Date(1970, 1, 1, 12, 00, 0, 0, time.UTC) // 1970 12:00:00 UTC
+ eleven = twelve.Add(-1 * time.Hour) // 1970 11:00:00 UTC
+ ten = eleven.Add(-1 * time.Hour) // 1970 10:00:00 UTC
+ )
+
+ for _, tc := range []struct {
+ name string
+ in queryrangebase.Request
+ subQueries []queryrangebase.RequestResponse
+ expected queryrangebase.Response
+ splitByInterval time.Duration
+ }{
+ {
+ name: "sum_splitBy_aligned_with_query_time",
+ splitByInterval: 1 * time.Minute,
+ in: &LokiInstantRequest{
+ Query: `sum(bytes_over_time({app="foo"}[3m]))`,
+ TimeTs: time.Unix(180, 0),
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(bytes_over_time({app="foo"}[3m]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum(bytes_over_time({app="foo"}[1m]))`, 1, time.Unix(60, 0)),
+ subQueryRequestResponseWithQueryTime(`sum(bytes_over_time({app="foo"}[1m]))`, 2, time.Unix(120, 0)),
+ subQueryRequestResponseWithQueryTime(`sum(bytes_over_time({app="foo"}[1m]))`, 3, time.Unix(180, 0)),
+ },
+ expected: expectedMergedResponseWithTime(1+2+3, time.Unix(180, 0)), // original `TimeTs` of the query.
+ },
+ {
+ name: "sum_splitBy_not_aligned_query_time",
+ splitByInterval: 1 * time.Hour,
+ in: &LokiInstantRequest{
+ Query: `sum(bytes_over_time({app="foo"}[3h]))`,
+ TimeTs: twelve34,
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(bytes_over_time({app="foo"}[3h]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum(bytes_over_time({app="foo"}[34m]))`, 1, twelve34),
+ subQueryRequestResponseWithQueryTime(`sum(bytes_over_time({app="foo"}[1h]))`, 2, twelve),
+ subQueryRequestResponseWithQueryTime(`sum(bytes_over_time({app="foo"}[1h]))`, 3, eleven),
+ subQueryRequestResponseWithQueryTime(`sum(bytes_over_time({app="foo"}[26m]))`, 4, ten),
+ },
+ expected: expectedMergedResponseWithTime(1+2+3+4, twelve34), // original `TimeTs` of the query.
+ },
+ {
+ name: "sum_aggregation_splitBy_aligned_with_query_time",
+ splitByInterval: 1 * time.Minute,
+ in: &LokiInstantRequest{
+ Query: `sum by (bar) (bytes_over_time({app="foo"}[3m]))`,
+ TimeTs: time.Unix(180, 0),
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (bytes_over_time({app="foo"}[3m]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(bytes_over_time({app="foo"}[1m]))`, 10, time.Unix(60, 0)),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(bytes_over_time({app="foo"}[1m]))`, 20, time.Unix(120, 0)),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(bytes_over_time({app="foo"}[1m]))`, 30, time.Unix(180, 0)),
+ },
+ expected: expectedMergedResponseWithTime(10+20+30, time.Unix(180, 0)),
+ },
+ {
+ name: "sum_aggregation_splitBy_not_aligned_with_query_time",
+ splitByInterval: 1 * time.Hour,
+ in: &LokiInstantRequest{
+ Query: `sum by (bar) (bytes_over_time({app="foo"}[3h]))`,
+ TimeTs: twelve34,
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (bytes_over_time({app="foo"}[3h]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(bytes_over_time({app="foo"}[34m]))`, 10, twelve34), // 12:34:00
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(bytes_over_time({app="foo"}[1h]))`, 20, twelve), // 12:00:00 aligned
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(bytes_over_time({app="foo"}[1h]))`, 30, eleven), // 11:00:00 aligned
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(bytes_over_time({app="foo"}[26m]))`, 40, ten), // 10:00:00
+ },
+ expected: expectedMergedResponseWithTime(10+20+30+40, twelve34),
+ },
+ {
+ name: "count_over_time_aligned_with_query_time",
+ splitByInterval: 1 * time.Minute,
+ in: &LokiInstantRequest{
+ Query: `sum(count_over_time({app="foo"}[3m]))`,
+ TimeTs: time.Unix(180, 0),
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(count_over_time({app="foo"}[3m]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum(count_over_time({app="foo"}[1m]))`, 1, time.Unix(60, 0)),
+ subQueryRequestResponseWithQueryTime(`sum(count_over_time({app="foo"}[1m]))`, 1, time.Unix(120, 0)),
+ subQueryRequestResponseWithQueryTime(`sum(count_over_time({app="foo"}[1m]))`, 1, time.Unix(180, 0)),
+ },
+ expected: expectedMergedResponseWithTime(1+1+1, time.Unix(180, 0)),
+ },
+ {
+ name: "count_over_time_not_aligned_with_query_time",
+ splitByInterval: 1 * time.Hour,
+ in: &LokiInstantRequest{
+ Query: `sum(count_over_time({app="foo"}[3h]))`,
+ TimeTs: twelve34,
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(count_over_time({app="foo"}[3h]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum(count_over_time({app="foo"}[34m]))`, 1, twelve34),
+ subQueryRequestResponseWithQueryTime(`sum(count_over_time({app="foo"}[1h]))`, 1, twelve),
+ subQueryRequestResponseWithQueryTime(`sum(count_over_time({app="foo"}[1h]))`, 1, eleven),
+ subQueryRequestResponseWithQueryTime(`sum(count_over_time({app="foo"}[26m]))`, 1, ten),
+ },
+ expected: expectedMergedResponseWithTime(1+1+1+1, twelve34),
+ },
+ {
+ name: "sum_agg_count_over_time_align_with_query_time",
+ splitByInterval: 1 * time.Minute,
+ in: &LokiInstantRequest{
+ Query: `sum by (bar) (count_over_time({app="foo"}[3m]))`,
+ TimeTs: time.Unix(180, 0),
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (count_over_time({app="foo"}[3m]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(count_over_time({app="foo"}[1m]))`, 0, time.Unix(60, 0)),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(count_over_time({app="foo"}[1m]))`, 0, time.Unix(120, 0)),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(count_over_time({app="foo"}[1m]))`, 0, time.Unix(180, 0)),
+ },
+ expected: expectedMergedResponseWithTime(0+0+0, time.Unix(180, 0)),
+ },
+ {
+ name: "sum_agg_count_over_time_not_align_with_query_time",
+ splitByInterval: 1 * time.Hour,
+ in: &LokiInstantRequest{
+ Query: `sum by (bar) (count_over_time({app="foo"}[3h]))`,
+ TimeTs: twelve34,
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (count_over_time({app="foo"}[3h]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(count_over_time({app="foo"}[34m]))`, 0, twelve34),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(count_over_time({app="foo"}[1h]))`, 0, twelve),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(count_over_time({app="foo"}[1h]))`, 0, eleven),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(count_over_time({app="foo"}[26m]))`, 0, ten),
+ },
+ expected: expectedMergedResponseWithTime(0+0+0+0, twelve34),
+ },
+ {
+ name: "sum_over_time_aligned_with_query_time",
+ splitByInterval: 1 * time.Minute,
+ in: &LokiInstantRequest{
+ Query: `sum(sum_over_time({app="foo"} | unwrap bar [3m]))`,
+ TimeTs: time.Unix(180, 0),
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(sum_over_time({app="foo"} | unwrap bar [3m]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1, time.Unix(60, 0)),
+ subQueryRequestResponseWithQueryTime(`sum(sum_over_time({app="foo"} | unwrap bar[1m]))`, 2, time.Unix(120, 0)),
+ subQueryRequestResponseWithQueryTime(`sum(sum_over_time({app="foo"} | unwrap bar[1m]))`, 3, time.Unix(180, 0)),
+ },
+ expected: expectedMergedResponseWithTime(1+2+3, time.Unix(180, 0)),
+ },
+ {
+ name: "sum_over_time_not_aligned_with_query_time",
+ splitByInterval: 1 * time.Hour,
+ in: &LokiInstantRequest{
+ Query: `sum(sum_over_time({app="foo"} | unwrap bar [3h]))`,
+ TimeTs: twelve34,
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum(sum_over_time({app="foo"} | unwrap bar [3h]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum(sum_over_time({app="foo"} | unwrap bar[34m]))`, 1, twelve34),
+ subQueryRequestResponseWithQueryTime(`sum(sum_over_time({app="foo"} | unwrap bar[1h]))`, 2, twelve),
+ subQueryRequestResponseWithQueryTime(`sum(sum_over_time({app="foo"} | unwrap bar[1h]))`, 3, eleven),
+ subQueryRequestResponseWithQueryTime(`sum(sum_over_time({app="foo"} | unwrap bar[26m]))`, 4, ten),
+ },
+ expected: expectedMergedResponseWithTime(1+2+3+4, twelve34),
+ },
+ {
+ name: "sum_agg_sum_over_time_aligned_with_query_time",
+ splitByInterval: 1 * time.Minute,
+ in: &LokiInstantRequest{
+ Query: `sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3m]))`,
+ TimeTs: time.Unix(180, 0),
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3m]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1, time.Unix(60, 0)),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m]))`, 2, time.Unix(120, 0)),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m]))`, 3, time.Unix(180, 0)),
+ },
+ expected: expectedMergedResponseWithTime(1+2+3, time.Unix(180, 0)),
+ },
+ {
+ name: "sum_agg_sum_over_time_not_aligned_with_query_time",
+ splitByInterval: 1 * time.Hour,
+ in: &LokiInstantRequest{
+ Query: `sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3h]))`,
+ TimeTs: twelve34,
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(`sum by (bar) (sum_over_time({app="foo"} | unwrap bar [3h]))`),
+ },
+ },
+ subQueries: []queryrangebase.RequestResponse{
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[34m]))`, 1, twelve34),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1h]))`, 2, twelve),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1h]))`, 3, eleven),
+ subQueryRequestResponseWithQueryTime(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[26m]))`, 4, ten),
+ },
+ expected: expectedMergedResponseWithTime(1+2+3+4, twelve34),
+ },
+ } {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ srm := NewSplitByRangeMiddleware(log.NewNopLogger(), testEngineOpts, fakeLimits{
+ maxSeries: 10000,
+ queryTimeout: time.Second,
+ instantMetricSplitDuration: map[string]time.Duration{
+ "tenant": tc.splitByInterval,
+ },
+ }, true, nilShardingMetrics) // enable splitAlign
+
+ ctx := user.InjectOrgID(context.TODO(), "tenant")
+
+ byTimeTs := make(map[int64]queryrangebase.RequestResponse)
+ for _, v := range tc.subQueries {
+ key := v.Request.(*LokiInstantRequest).TimeTs.UnixNano()
+ byTimeTs[key] = v
+ }
+
+ resp, err := srm.Wrap(queryrangebase.HandlerFunc(
+ func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
+ // req should match with one of the subqueries.
+ ts := req.(*LokiInstantRequest).TimeTs
+ subq, ok := byTimeTs[ts.UnixNano()]
+ if !ok { // every req **should** match with one of the subqueries
+ return nil, fmt.Errorf("subquery request '%s-%s' not found", req.GetQuery(), ts)
+ }
+
+ // Assert subquery request
+ assert.Equal(t, subq.Request.GetQuery(), req.GetQuery())
+ assert.Equal(t, subq.Request, req)
+ return subq.Response, nil
+
+ })).Do(ctx, tc.in)
+ require.NoError(t, err)
+ assert.Equal(t, tc.expected, resp.(*LokiPromResponse).Response)
+ })
+ }
+}
+
func Test_RangeVectorSplit(t *testing.T) {
srm := NewSplitByRangeMiddleware(log.NewNopLogger(), testEngineOpts, fakeLimits{
maxSeries: 10000,
queryTimeout: time.Second,
- splitDuration: map[string]time.Duration{
+ instantMetricSplitDuration: map[string]time.Duration{
"tenant": time.Minute,
},
- }, nilShardingMetrics)
+ }, false, nilShardingMetrics)
ctx := user.InjectOrgID(context.TODO(), "tenant")
@@ -151,6 +429,39 @@ func Test_RangeVectorSplit(t *testing.T) {
}
}
+// subQueryRequestResponse returns a RequestResponse containing the expected subQuery instant request
+// and a response containing a sample value returned from the following wrapper
+func subQueryRequestResponseWithQueryTime(expectedSubQuery string, sampleValue float64, exec time.Time) queryrangebase.RequestResponse {
+ return queryrangebase.RequestResponse{
+ Request: &LokiInstantRequest{
+ Query: expectedSubQuery,
+ TimeTs: exec,
+ Path: "/loki/api/v1/query",
+ Plan: &plan.QueryPlan{
+ AST: syntax.MustParseExpr(expectedSubQuery),
+ },
+ },
+ Response: &LokiPromResponse{
+ Response: &queryrangebase.PrometheusResponse{
+ Status: loghttp.QueryStatusSuccess,
+ Data: queryrangebase.PrometheusData{
+ ResultType: loghttp.ResultTypeVector,
+ Result: []queryrangebase.SampleStream{
+ {
+ Labels: []logproto.LabelAdapter{
+ {Name: "app", Value: "foo"},
+ },
+ Samples: []logproto.LegacySample{
+ {TimestampMs: 1000, Value: sampleValue},
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
// subQueryRequestResponse returns a RequestResponse containing the expected subQuery instant request
// and a response containing a sample value returned from the following wrapper
func subQueryRequestResponse(expectedSubQuery string, sampleValue float64) queryrangebase.RequestResponse {
@@ -202,3 +513,20 @@ func expectedMergedResponse(expectedSampleValue float64) *queryrangebase.Prometh
},
}
}
+
+func expectedMergedResponseWithTime(expectedSampleValue float64, exec time.Time) *queryrangebase.PrometheusResponse {
+ return &queryrangebase.PrometheusResponse{
+ Status: loghttp.QueryStatusSuccess,
+ Data: queryrangebase.PrometheusData{
+ ResultType: loghttp.ResultTypeVector,
+ Result: []queryrangebase.SampleStream{
+ {
+ Labels: []logproto.LabelAdapter{},
+ Samples: []logproto.LegacySample{
+ {TimestampMs: exec.UnixMilli(), Value: expectedSampleValue},
+ },
+ },
+ },
+ },
+ }
+}
diff --git a/pkg/util/marshal/legacy/marshal_test.go b/pkg/util/marshal/legacy/marshal_test.go
index 6e07d8461592..a3dca73ac299 100644
--- a/pkg/util/marshal/legacy/marshal_test.go
+++ b/pkg/util/marshal/legacy/marshal_test.go
@@ -161,6 +161,16 @@ var queryTests = []struct {
"downloadTime": 0,
"queryLengthServed": 0
},
+ "instantMetricResult": {
+ "entriesFound": 0,
+ "entriesRequested": 0,
+ "entriesStored": 0,
+ "bytesReceived": 0,
+ "bytesSent": 0,
+ "requests": 0,
+ "downloadTime": 0,
+ "queryLengthServed": 0
+ },
"result": {
"entriesFound": 0,
"entriesRequested": 0,
@@ -180,7 +190,7 @@ var queryTests = []struct {
"shards": 0,
"splits": 0,
"subqueries": 0,
- "totalBytesProcessed": 0,
+ "totalBytesProcessed": 0,
"totalEntriesReturned": 0,
"totalLinesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go
index d5336298c37c..ce7a49f97e76 100644
--- a/pkg/util/marshal/marshal_test.go
+++ b/pkg/util/marshal/marshal_test.go
@@ -129,6 +129,16 @@ const emptyStats = `{
"downloadTime": 0,
"queryLengthServed": 0
},
+ "instantMetricResult": {
+ "entriesFound": 0,
+ "entriesRequested": 0,
+ "entriesStored": 0,
+ "bytesReceived": 0,
+ "bytesSent": 0,
+ "requests": 0,
+ "downloadTime": 0,
+ "queryLengthServed": 0
+ },
"result": {
"entriesFound": 0,
"entriesRequested": 0,
@@ -208,13 +218,13 @@ var queryTestWithEncodingFlags = []struct {
[ "123456789012346", "super line with labels", {
"structuredMetadata": {
"foo": "a",
- "bar": "b"
- }
+ "bar": "b"
+ }
}],
[ "123456789012347", "super line with labels msg=text", {
"structuredMetadata": {
"foo": "a",
- "bar": "b"
+ "bar": "b"
},
"parsed": {
"msg": "text"
@@ -549,13 +559,13 @@ var tailTestWithEncodingFlags = []struct {
[ "123456789012346", "super line with labels", {
"structuredMetadata": {
"foo": "a",
- "bar": "b"
- }
+ "bar": "b"
+ }
}],
[ "123456789012347", "super line with labels msg=text", {
"structuredMetadata": {
"foo": "a",
- "bar": "b"
+ "bar": "b"
},
"parsed": {
"msg": "text"
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 00ee2e152144..ab845380f968 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -111,6 +111,7 @@ type Limits struct {
MetadataQuerySplitDuration model.Duration `yaml:"split_metadata_queries_by_interval" json:"split_metadata_queries_by_interval"`
RecentMetadataQuerySplitDuration model.Duration `yaml:"split_recent_metadata_queries_by_interval" json:"split_recent_metadata_queries_by_interval"`
RecentMetadataQueryWindow model.Duration `yaml:"recent_metadata_query_window" json:"recent_metadata_query_window"`
+ InstantMetricQuerySplitDuration model.Duration `yaml:"split_instant_metric_queries_by_interval" json:"split_instant_metric_queries_by_interval"`
IngesterQuerySplitDuration model.Duration `yaml:"split_ingester_queries_by_interval" json:"split_ingester_queries_by_interval"`
MinShardingLookback model.Duration `yaml:"min_sharding_lookback" json:"min_sharding_lookback"`
MaxQueryBytesRead flagext.ByteSize `yaml:"max_query_bytes_read" json:"max_query_bytes_read"`
@@ -307,6 +308,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.QuerySplitDuration.Set("1h")
f.Var(&l.QuerySplitDuration, "querier.split-queries-by-interval", "Split queries by a time interval and execute in parallel. The value 0 disables splitting by time. This also determines how cache keys are chosen when result caching is enabled.")
+ _ = l.InstantMetricQuerySplitDuration.Set("1h")
+ f.Var(&l.InstantMetricQuerySplitDuration, "querier.split-instant-metric-queries-by-interval", "Split instant metric queries by a time interval and execute in parallel. The value 0 disables splitting instant metric queries by time. This also determines how cache keys are chosen when instant metric query result caching is enabled.")
_ = l.MetadataQuerySplitDuration.Set("24h")
f.Var(&l.MetadataQuerySplitDuration, "querier.split-metadata-queries-by-interval", "Split metadata queries by a time interval and execute in parallel. The value 0 disables splitting metadata queries by time. This also determines how cache keys are chosen when label/series result caching is enabled.")
@@ -601,6 +604,11 @@ func (o *Overrides) QuerySplitDuration(userID string) time.Duration {
return time.Duration(o.getOverridesForUser(userID).QuerySplitDuration)
}
+// InstantMetricQuerySplitDuration returns the tenant specific instant metric queries splitby interval applied in the query frontend.
+func (o *Overrides) InstantMetricQuerySplitDuration(userID string) time.Duration {
+ return time.Duration(o.getOverridesForUser(userID).InstantMetricQuerySplitDuration)
+}
+
// MetadataQuerySplitDuration returns the tenant specific metadata splitby interval applied in the query frontend.
func (o *Overrides) MetadataQuerySplitDuration(userID string) time.Duration {
return time.Duration(o.getOverridesForUser(userID).MetadataQuerySplitDuration)
From bdea0b6df1e549f95ef457093184fd615db3c726 Mon Sep 17 00:00:00 2001
From: Danny Kopping
Date: Tue, 20 Feb 2024 12:32:47 +0200
Subject: [PATCH 092/130] Add o11y for chunk data retrieved from ingesters
(#12003)
Signed-off-by: Danny Kopping
---
pkg/logql/metrics.go | 30 +++++++++++++++++++++++++++---
pkg/logqlmodel/stats/context.go | 4 ++++
2 files changed, 31 insertions(+), 3 deletions(-)
diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go
index b55e9840a475..694acefbc574 100644
--- a/pkg/logql/metrics.go
+++ b/pkg/logql/metrics.go
@@ -142,9 +142,9 @@ func RecordRangeAndInstantQueryMetrics(
"status", status,
"limit", p.Limit(),
"returned_lines", returnedLines,
- "throughput", strings.Replace(humanize.Bytes(uint64(stats.Summary.BytesProcessedPerSecond)), " ", "", 1),
- "total_bytes", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalBytesProcessed)), " ", "", 1),
- "total_bytes_structured_metadata", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalStructuredMetadataBytesProcessed)), " ", "", 1),
+ "throughput", humanizeBytes(uint64(stats.Summary.BytesProcessedPerSecond)),
+ "total_bytes", humanizeBytes(uint64(stats.Summary.TotalBytesProcessed)),
+ "total_bytes_structured_metadata", humanizeBytes(uint64(stats.Summary.TotalStructuredMetadataBytesProcessed)),
"lines_per_second", stats.Summary.LinesProcessedPerSecond,
"total_lines", stats.Summary.TotalLinesProcessed,
"post_filter_lines", stats.Summary.TotalPostFilterLines,
@@ -173,6 +173,26 @@ func RecordRangeAndInstantQueryMetrics(
"cache_result_hit", resultCache.EntriesFound,
"cache_result_download_time", resultCache.CacheDownloadTime(),
"cache_result_query_length_served", resultCache.CacheQueryLengthServed(),
+ // The total of chunk reference fetched from index.
+ "ingester_chunk_refs", stats.Ingester.Store.GetTotalChunksRef(),
+ // Total number of chunks fetched.
+ "ingester_chunk_downloaded", stats.Ingester.Store.GetTotalChunksDownloaded(),
+ // Time spent fetching chunks in nanoseconds.
+ "ingester_chunk_fetch_time", stats.Ingester.Store.ChunksDownloadDuration(),
+ // Total of chunks matched by the query from ingesters.
+ "ingester_chunk_matches", stats.Ingester.GetTotalChunksMatched(),
+ // Total ingester reached for this query.
+ "ingester_requests", stats.Ingester.GetTotalReached(),
+ // Total bytes processed but was already in memory (found in the headchunk). Includes structured metadata bytes.
+ "ingester_chunk_head_bytes", humanizeBytes(uint64(stats.Ingester.Store.Chunk.GetHeadChunkBytes())),
+ // Total bytes of compressed chunks (blocks) processed.
+ "ingester_chunk_compressed_bytes", humanizeBytes(uint64(stats.Ingester.Store.Chunk.GetCompressedBytes())),
+ // Total bytes decompressed and processed from chunks. Includes structured metadata bytes.
+ "ingester_chunk_decompressed_bytes", humanizeBytes(uint64(stats.Ingester.Store.Chunk.GetDecompressedBytes())),
+ // Total duplicates found while processing.
+ "ingester_chunk_duplicates", stats.Ingester.Store.Chunk.GetTotalDuplicates(),
+ // Total lines post filtering.
+ "ingester_post_filter_lines", stats.Ingester.Store.Chunk.GetPostFilterLines(),
}...)
logValues = append(logValues, tagsToKeyValues(queryTags)...)
@@ -200,6 +220,10 @@ func RecordRangeAndInstantQueryMetrics(
recordUsageStats(queryType, stats)
}
+func humanizeBytes(val uint64) string {
+ return strings.Replace(humanize.Bytes(val), " ", "", 1)
+}
+
func RecordLabelQueryMetrics(
ctx context.Context,
log log.Logger,
diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go
index 41a96ca24c75..1271fa6d9c21 100644
--- a/pkg/logqlmodel/stats/context.go
+++ b/pkg/logqlmodel/stats/context.go
@@ -199,6 +199,10 @@ func (s *Store) Merge(m Store) {
}
}
+func (s *Store) ChunksDownloadDuration() time.Duration {
+ return time.Duration(s.GetChunksDownloadTime())
+}
+
func (s *Summary) Merge(m Summary) {
s.Splits += m.Splits
s.Shards += m.Shards
From 620488656f79c3944c7dfa92165ac55d2408d966 Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Tue, 20 Feb 2024 15:54:49 +0100
Subject: [PATCH 093/130] Set tracing.profiling-enabled to true by default
(#12004)
---
pkg/tracing/config.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/pkg/tracing/config.go b/pkg/tracing/config.go
index f9faefa6a730..a0877a9ce836 100644
--- a/pkg/tracing/config.go
+++ b/pkg/tracing/config.go
@@ -11,6 +11,7 @@ type Config struct {
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.Enabled, "tracing.enabled", true, "Set to false to disable tracing.")
+ f.BoolVar(&cfg.ProfilingEnabled, "tracing.profiling-enabled", true, "Set to true to enable profiling integration.")
}
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
From 71fa802c86a39701f2cc171caa1392fcb9309eba Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Tue, 20 Feb 2024 18:06:29 +0100
Subject: [PATCH 094/130] Map ring token keyspace (uint32) into fingerprint
keyspace (uint64) (#11975)
In order to compare the keyspace from the bloom gateway's ring with the fingerprints, they need to use the same keyspace.
The ring, however, uses tokens within a `uint32` keyspace, whereas the fingerprints use a `uint64` keyspace.
Therefore the ranges from the ring tokens need to be mapped into the `uint64` keyspace. This is done bit-shifting the Min value 32 bits to the left and the Max value 32 bits to the left plus adding `(1<<32)-1` to "fill" the remaining values up to the next Min value.
The structs used to combined ring instance information and token/fingerprint ranges are yet to be simplified. However, this is not goal of this PR.
Signed-off-by: Christian Haudum
Co-authored-by: Owen Diehl
---
pkg/bloomgateway/client.go | 72 ++++----
pkg/bloomgateway/client_test.go | 302 ++++++++++++++++++--------------
pkg/bloomutils/ring.go | 6 +-
3 files changed, 220 insertions(+), 160 deletions(-)
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index e5fd35d884fb..721e0c35ca50 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -291,16 +291,26 @@ func (c *GatewayClient) doForAddrs(addrs []string, fn func(logproto.BloomGateway
return err
}
-func groupFingerprintsByServer(groups []*logproto.GroupedChunkRefs, servers []addrsWithTokenRange) []instanceWithFingerprints {
+func groupFingerprintsByServer(groups []*logproto.GroupedChunkRefs, servers []addrsWithBounds) []instanceWithFingerprints {
boundedFingerprints := partitionFingerprintsByAddresses(groups, servers)
return groupByInstance(boundedFingerprints)
}
-func serverAddressesWithTokenRanges(subRing ring.ReadRing, instances []ring.InstanceDesc) ([]addrsWithTokenRange, error) {
+func mapTokenRangeToFingerprintRange(r bloomutils.Range[uint32]) v1.FingerprintBounds {
+ minFp := uint64(r.Min) << 32
+ maxFp := uint64(r.Max) << 32
+ return v1.NewBounds(
+ model.Fingerprint(minFp),
+ model.Fingerprint(maxFp|math.MaxUint32),
+ )
+}
+
+func serverAddressesWithTokenRanges(subRing ring.ReadRing, instances []ring.InstanceDesc) ([]addrsWithBounds, error) {
bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet()
- servers := make([]addrsWithTokenRange, 0, len(instances))
+ servers := make([]addrsWithBounds, 0, len(instances))
it := bloomutils.NewInstanceSortMergeIterator(instances)
+
for it.Next() {
// We can use on of the tokens from the token range
// to obtain all addresses for that token.
@@ -308,54 +318,56 @@ func serverAddressesWithTokenRanges(subRing ring.ReadRing, instances []ring.Inst
if err != nil {
return nil, errors.Wrap(err, "bloom gateway get ring")
}
- servers = append(servers, addrsWithTokenRange{
- id: it.At().Instance.Id,
- addrs: rs.GetAddresses(),
- tokenRange: it.At().TokenRange,
+
+ bounds := mapTokenRangeToFingerprintRange(it.At().TokenRange)
+ servers = append(servers, addrsWithBounds{
+ id: it.At().Instance.Id,
+ addrs: rs.GetAddresses(),
+ FingerprintBounds: bounds,
})
}
- if len(servers) > 0 && servers[len(servers)-1].tokenRange.Max < math.MaxUint32 {
- // append the instance for the token range between the greates token and MaxUint32
- servers = append(servers, addrsWithTokenRange{
- id: servers[0].id,
- addrs: servers[0].addrs,
- tokenRange: bloomutils.NewTokenRange(servers[len(servers)-1].tokenRange.Max+1, math.MaxUint32),
+ if len(servers) > 0 && servers[len(servers)-1].Max < math.MaxUint64 {
+ // append the instance for the range between the maxFp and MaxUint64
+ // TODO(owen-d): support wrapping around keyspace for token ranges
+ servers = append(servers, addrsWithBounds{
+ id: servers[0].id,
+ addrs: servers[0].addrs,
+ FingerprintBounds: v1.NewBounds(
+ servers[len(servers)-1].Max+1,
+ model.Fingerprint(math.MaxUint64),
+ ),
})
}
return servers, nil
}
-type addrsWithTokenRange struct {
- id string
- addrs []string
- tokenRange bloomutils.Range[uint32]
-}
-
-func (s addrsWithTokenRange) cmp(token uint32) v1.BoundsCheck {
- return s.tokenRange.Cmp(token)
+type addrsWithBounds struct {
+ v1.FingerprintBounds
+ id string
+ addrs []string
}
type instanceWithFingerprints struct {
- instance addrsWithTokenRange
+ instance addrsWithBounds
fingerprints []*logproto.GroupedChunkRefs
}
-func partitionFingerprintsByAddresses(fingerprints []*logproto.GroupedChunkRefs, addresses []addrsWithTokenRange) (result []instanceWithFingerprints) {
+func partitionFingerprintsByAddresses(fingerprints []*logproto.GroupedChunkRefs, addresses []addrsWithBounds) (result []instanceWithFingerprints) {
for _, instance := range addresses {
- min, _ := slices.BinarySearchFunc(fingerprints, instance.tokenRange, func(g *logproto.GroupedChunkRefs, r bloomutils.Range[uint32]) int {
- if uint32(g.Fingerprint) < r.Min {
+ min, _ := slices.BinarySearchFunc(fingerprints, instance.FingerprintBounds, func(g *logproto.GroupedChunkRefs, b v1.FingerprintBounds) int {
+ if g.Fingerprint < uint64(b.Min) {
return -1
- } else if uint32(g.Fingerprint) > r.Min {
+ } else if g.Fingerprint > uint64(b.Min) {
return 1
}
return 0
})
- max, _ := slices.BinarySearchFunc(fingerprints, instance.tokenRange, func(g *logproto.GroupedChunkRefs, r bloomutils.Range[uint32]) int {
- if uint32(g.Fingerprint) <= r.Max {
+ max, _ := slices.BinarySearchFunc(fingerprints, instance.FingerprintBounds, func(g *logproto.GroupedChunkRefs, b v1.FingerprintBounds) int {
+ if g.Fingerprint <= uint64(b.Max) {
return -1
- } else if uint32(g.Fingerprint) > r.Max {
+ } else if g.Fingerprint > uint64(b.Max) {
return 1
}
return 0
@@ -398,7 +410,7 @@ func groupByInstance(boundedFingerprints []instanceWithFingerprints) []instanceW
pos[cur.instance.id] = len(result)
result = append(result, instanceWithFingerprints{
- instance: addrsWithTokenRange{
+ instance: addrsWithBounds{
id: cur.instance.id,
addrs: cur.instance.addrs,
},
diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go
index 8a9a3d35646c..71ac0ec0639a 100644
--- a/pkg/bloomgateway/client_test.go
+++ b/pkg/bloomgateway/client_test.go
@@ -17,12 +17,10 @@ import (
"github.com/grafana/loki/pkg/bloomutils"
"github.com/grafana/loki/pkg/logproto"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/validation"
)
-// short constructor
-var newTr = bloomutils.NewTokenRange
-
func TestBloomGatewayClient(t *testing.T) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
@@ -43,24 +41,20 @@ func TestBloomGatewayClient(t *testing.T) {
}
func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
+ // Create 10 fingerprints [0, 2, 4, ... 18]
+ groups := make([]*logproto.GroupedChunkRefs, 0, 10)
+ for i := 0; i < 20; i += 2 {
+ groups = append(groups, &logproto.GroupedChunkRefs{Fingerprint: uint64(i)})
+ }
+
// instance token ranges do not overlap
t.Run("non-overlapping", func(t *testing.T) {
- groups := []*logproto.GroupedChunkRefs{
- {Fingerprint: 0},
- {Fingerprint: 100},
- {Fingerprint: 101},
- {Fingerprint: 200},
- {Fingerprint: 201},
- {Fingerprint: 300},
- {Fingerprint: 301},
- {Fingerprint: 400},
- {Fingerprint: 401}, // out of bounds, will be dismissed
- }
- servers := []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, 100)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(101, 200)},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, tokenRange: newTr(201, 300)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(301, 400)},
+
+ servers := []addrsWithBounds{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, 4)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(5, 9)},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, FingerprintBounds: v1.NewBounds(10, 14)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(15, 19)},
}
// partition fingerprints
@@ -70,28 +64,30 @@ func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
instance: servers[0],
fingerprints: []*logproto.GroupedChunkRefs{
{Fingerprint: 0},
- {Fingerprint: 100},
+ {Fingerprint: 2},
+ {Fingerprint: 4},
},
},
{
instance: servers[1],
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 101},
- {Fingerprint: 200},
+ {Fingerprint: 6},
+ {Fingerprint: 8},
},
},
{
instance: servers[2],
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 201},
- {Fingerprint: 300},
+ {Fingerprint: 10},
+ {Fingerprint: 12},
+ {Fingerprint: 14},
},
},
{
instance: servers[3],
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 301},
- {Fingerprint: 400},
+ {Fingerprint: 16},
+ {Fingerprint: 18},
},
},
}
@@ -103,26 +99,28 @@ func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
expected = []instanceWithFingerprints{
{
- instance: addrsWithTokenRange{id: "instance-1", addrs: []string{"10.0.0.1"}},
+ instance: addrsWithBounds{id: "instance-1", addrs: []string{"10.0.0.1"}},
fingerprints: []*logproto.GroupedChunkRefs{
{Fingerprint: 0},
- {Fingerprint: 100},
+ {Fingerprint: 2},
+ {Fingerprint: 4},
},
},
{
- instance: addrsWithTokenRange{id: "instance-2", addrs: []string{"10.0.0.2"}},
+ instance: addrsWithBounds{id: "instance-2", addrs: []string{"10.0.0.2"}},
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 101},
- {Fingerprint: 200},
- {Fingerprint: 301},
- {Fingerprint: 400},
+ {Fingerprint: 6},
+ {Fingerprint: 8},
+ {Fingerprint: 16},
+ {Fingerprint: 18},
},
},
{
- instance: addrsWithTokenRange{id: "instance-3", addrs: []string{"10.0.0.3"}},
+ instance: addrsWithBounds{id: "instance-3", addrs: []string{"10.0.0.3"}},
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 201},
- {Fingerprint: 300},
+ {Fingerprint: 10},
+ {Fingerprint: 12},
+ {Fingerprint: 14},
},
},
}
@@ -132,33 +130,45 @@ func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
// instance token ranges overlap
t.Run("overlapping", func(t *testing.T) {
- groups := []*logproto.GroupedChunkRefs{
- {Fingerprint: 50},
- {Fingerprint: 150},
- {Fingerprint: 250},
- {Fingerprint: 350},
- }
- servers := []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, 200)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(100, 300)},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, tokenRange: newTr(200, 400)},
+ servers := []addrsWithBounds{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, 9)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(5, 14)},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, FingerprintBounds: v1.NewBounds(10, 19)},
}
// partition fingerprints
expected := []instanceWithFingerprints{
- {instance: servers[0], fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 50},
- {Fingerprint: 150},
- }},
- {instance: servers[1], fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 150},
- {Fingerprint: 250},
- }},
- {instance: servers[2], fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 250},
- {Fingerprint: 350},
- }},
+ {
+ instance: servers[0],
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 0},
+ {Fingerprint: 2},
+ {Fingerprint: 4},
+ {Fingerprint: 6},
+ {Fingerprint: 8},
+ },
+ },
+ {
+ instance: servers[1],
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 6},
+ {Fingerprint: 8},
+ {Fingerprint: 10},
+ {Fingerprint: 12},
+ {Fingerprint: 14},
+ },
+ },
+ {
+ instance: servers[2],
+ fingerprints: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 10},
+ {Fingerprint: 12},
+ {Fingerprint: 14},
+ {Fingerprint: 16},
+ {Fingerprint: 18},
+ },
+ },
}
bounded := partitionFingerprintsByAddresses(groups, servers)
@@ -177,12 +187,15 @@ func BenchmarkPartitionFingerprintsByAddresses(b *testing.B) {
numServers := 100
tokenStep := math.MaxUint32 / uint32(numServers)
- servers := make([]addrsWithTokenRange, 0, numServers)
+ servers := make([]addrsWithBounds, 0, numServers)
for i := uint32(0); i < math.MaxUint32-tokenStep; i += tokenStep {
- servers = append(servers, addrsWithTokenRange{
- id: fmt.Sprintf("instance-%x", i),
- addrs: []string{fmt.Sprintf("%d", i)},
- tokenRange: newTr(i, i+tokenStep),
+ servers = append(servers, addrsWithBounds{
+ id: fmt.Sprintf("instance-%x", i),
+ addrs: []string{fmt.Sprintf("%d", i)},
+ FingerprintBounds: v1.NewBounds(
+ model.Fingerprint(i)<<32,
+ model.Fingerprint(i+tokenStep)<<32,
+ ),
})
}
@@ -193,34 +206,55 @@ func BenchmarkPartitionFingerprintsByAddresses(b *testing.B) {
}
}
+func TestBloomGatewayClient_MapTokenRangeToFingerprintRange(t *testing.T) {
+ testCases := map[string]struct {
+ lshift int
+ inp bloomutils.Range[uint32]
+ exp v1.FingerprintBounds
+ }{
+ "single token expands to multiple fingerprints": {
+ inp: bloomutils.NewTokenRange(0, 0),
+ exp: v1.NewBounds(0, 0xffffffff),
+ },
+ "max value expands to max value of new range": {
+ inp: bloomutils.NewTokenRange((1 << 31), math.MaxUint32),
+ exp: v1.NewBounds((1 << 63), 0xffffffffffffffff),
+ },
+ }
+ for desc, tc := range testCases {
+ t.Run(desc, func(t *testing.T) {
+ actual := mapTokenRangeToFingerprintRange(tc.inp)
+ require.Equal(t, tc.exp, actual)
+ })
+ }
+}
+
func TestBloomGatewayClient_ServerAddressesWithTokenRanges(t *testing.T) {
testCases := map[string]struct {
instances []ring.InstanceDesc
- expected []addrsWithTokenRange
+ expected []addrsWithBounds
}{
- "one token per instance": {
+ "one token per instance, no gaps between fingerprint ranges": {
instances: []ring.InstanceDesc{
- {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{math.MaxUint32 / 6 * 1}},
- {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32 / 6 * 3}},
- {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{math.MaxUint32 / 6 * 5}},
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{(1 << 30) * 1}}, // 0x40000000
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{(1 << 30) * 2}}, // 0x80000000
+ {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{(1 << 30) * 3}}, // 0xc0000000
},
- expected: []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, math.MaxUint32/6*1)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(math.MaxUint32/6*1+1, math.MaxUint32/6*3)},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, tokenRange: newTr(math.MaxUint32/6*3+1, math.MaxUint32/6*5)},
- {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(math.MaxUint32/6*5+1, math.MaxUint32)},
+ expected: []addrsWithBounds{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, 4611686022722355199)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(4611686022722355200, 9223372041149743103)},
+ {id: "instance-3", addrs: []string{"10.0.0.3"}, FingerprintBounds: v1.NewBounds(9223372041149743104, 13835058059577131007)},
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(13835058059577131008, 18446744073709551615)},
},
},
- "MinUint32 and MaxUint32 are tokens in the ring": {
+ "MinUint32 and MaxUint32 are actual tokens in the ring": {
instances: []ring.InstanceDesc{
- {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{0, math.MaxUint32 / 3 * 2}},
- {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32 / 3 * 1, math.MaxUint32}},
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{0}},
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32}},
},
- expected: []addrsWithTokenRange{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(0, 0)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(1, math.MaxUint32/3)},
- {id: "instance-1", addrs: []string{"10.0.0.1"}, tokenRange: newTr(math.MaxUint32/3*1+1, math.MaxUint32/3*2)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, tokenRange: newTr(math.MaxUint32/3*2+1, math.MaxUint32)},
+ expected: []addrsWithBounds{
+ {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, (1<<32)-1)},
+ {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds((1 << 32), math.MaxUint64)},
},
},
}
@@ -239,15 +273,27 @@ func TestBloomGatewayClient_ServerAddressesWithTokenRanges(t *testing.T) {
func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
instances := []ring.InstanceDesc{
- {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{2146405214, 1029997044, 678878693}},
- {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{296463531, 1697323986, 800258284}},
- {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{2014002871, 315617625, 1036168527}},
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{0x1fffffff, 0x7fffffff}},
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{0x3fffffff, 0x9fffffff}},
+ {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{0x5fffffff, 0xbfffffff}},
}
- it := bloomutils.NewInstanceSortMergeIterator(instances)
- for it.Next() {
- t.Log(it.At().TokenRange.Max, it.At().Instance.Addr)
- }
+ subRing := newMockRing(instances)
+ servers, err := serverAddressesWithTokenRanges(subRing, instances)
+ require.NoError(t, err)
+
+ // for _, s := range servers {
+ // t.Log(s, v1.NewBounds(model.Fingerprint(s.fpRange.Min), model.Fingerprint(s.fpRange.Max)))
+ // }
+ /**
+ {instance-1 [10.0.0.1] { 0 536870911} { 0 2305843004918726656}} 0000000000000000-1fffffff00000000
+ {instance-2 [10.0.0.2] { 536870912 1073741823} { 2305843009213693952 4611686014132420608}} 2000000000000000-3fffffff00000000
+ {instance-3 [10.0.0.3] {1073741824 1610612735} { 4611686018427387904 6917529023346114560}} 4000000000000000-5fffffff00000000
+ {instance-1 [10.0.0.1] {1610612736 2147483647} { 6917529027641081856 9223372032559808512}} 6000000000000000-7fffffff00000000
+ {instance-2 [10.0.0.2] {2147483648 2684354559} { 9223372036854775808 11529215041773502464}} 8000000000000000-9fffffff00000000
+ {instance-3 [10.0.0.3] {2684354560 3221225471} {11529215046068469760 13835058050987196416}} a000000000000000-bfffffff00000000
+ {instance-1 [10.0.0.1] {3221225472 4294967295} {13835058055282163712 18446744073709551615}} c000000000000000-ffffffffffffffff
+ **/
testCases := []struct {
name string
@@ -262,18 +308,20 @@ func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
{
name: "fingerprints within a single token range are grouped",
chunks: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 1000000001, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ {Fingerprint: 0x5000000000000001},
+ {Fingerprint: 0x5000000000000010},
+ {Fingerprint: 0x5000000000000100},
},
expected: []instanceWithFingerprints{
{
- instance: addrsWithTokenRange{
- id: "instance-1",
- addrs: []string{"10.0.0.1"},
+ instance: addrsWithBounds{
+ id: "instance-3",
+ addrs: []string{"10.0.0.3"},
},
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 1000000001, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ {Fingerprint: 0x5000000000000001},
+ {Fingerprint: 0x5000000000000010},
+ {Fingerprint: 0x5000000000000100},
},
},
},
@@ -281,18 +329,20 @@ func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
{
name: "fingerprints within multiple token ranges of a single instance are grouped",
chunks: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ {Fingerprint: 0x1000000000000000},
+ {Fingerprint: 0x7000000000000000},
+ {Fingerprint: 0xd000000000000000},
},
expected: []instanceWithFingerprints{
{
- instance: addrsWithTokenRange{
+ instance: addrsWithBounds{
id: "instance-1",
addrs: []string{"10.0.0.1"},
},
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ {Fingerprint: 0x1000000000000000},
+ {Fingerprint: 0x7000000000000000},
+ {Fingerprint: 0xd000000000000000},
},
},
},
@@ -300,55 +350,52 @@ func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
{
name: "fingerprints with token ranges of multiple instances are grouped",
chunks: []*logproto.GroupedChunkRefs{
- // instance 1
- {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- // instance 1
- {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
- // instance 2
- {Fingerprint: 290000000, Refs: []*logproto.ShortRef{{Checksum: 3}}},
- // instance 2 (fingerprint equals instance token)
- {Fingerprint: 800258284, Refs: []*logproto.ShortRef{{Checksum: 4}}},
- // instance 2 (fingerprint greater than greatest token)
- {Fingerprint: 2147483648, Refs: []*logproto.ShortRef{{Checksum: 5}}},
- // instance 3
- {Fingerprint: 1029997045, Refs: []*logproto.ShortRef{{Checksum: 6}}},
+ {Fingerprint: 0x1000000000000000},
+ {Fingerprint: 0x3000000000000000},
+ {Fingerprint: 0x5000000000000000},
+ {Fingerprint: 0x7000000000000000},
+ {Fingerprint: 0x9000000000000000},
+ {Fingerprint: 0xb000000000000000},
+ {Fingerprint: 0xd000000000000000},
+ {Fingerprint: 0xf000000000000000},
},
expected: []instanceWithFingerprints{
{
- instance: addrsWithTokenRange{
- id: "instance-2",
- addrs: []string{"10.0.0.2"},
+ instance: addrsWithBounds{
+ id: "instance-1",
+ addrs: []string{"10.0.0.1"},
},
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 290000000, Refs: []*logproto.ShortRef{{Checksum: 3}}},
- {Fingerprint: 800258284, Refs: []*logproto.ShortRef{{Checksum: 4}}},
- {Fingerprint: 2147483648, Refs: []*logproto.ShortRef{{Checksum: 5}}},
+ {Fingerprint: 0x1000000000000000},
+ {Fingerprint: 0x7000000000000000},
+ {Fingerprint: 0xd000000000000000},
+ {Fingerprint: 0xf000000000000000},
},
},
{
- instance: addrsWithTokenRange{
- id: "instance-1",
- addrs: []string{"10.0.0.1"},
+ instance: addrsWithBounds{
+ id: "instance-2",
+ addrs: []string{"10.0.0.2"},
},
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1000000000, Refs: []*logproto.ShortRef{{Checksum: 1}}},
- {Fingerprint: 2100000000, Refs: []*logproto.ShortRef{{Checksum: 2}}},
+ {Fingerprint: 0x3000000000000000},
+ {Fingerprint: 0x9000000000000000},
},
},
{
- instance: addrsWithTokenRange{
+ instance: addrsWithBounds{
id: "instance-3",
addrs: []string{"10.0.0.3"},
},
fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 1029997045, Refs: []*logproto.ShortRef{{Checksum: 6}}},
+ {Fingerprint: 0x5000000000000000},
+ {Fingerprint: 0xb000000000000000},
},
},
},
},
}
- subRing := newMockRing(instances)
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
@@ -356,9 +403,6 @@ func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
sort.Slice(tc.chunks, func(i, j int) bool {
return tc.chunks[i].Fingerprint < tc.chunks[j].Fingerprint
})
-
- servers, err := serverAddressesWithTokenRanges(subRing, instances)
- require.NoError(t, err)
res := groupFingerprintsByServer(tc.chunks, servers)
require.Equal(t, tc.expected, res)
})
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index d2aebe5b88a3..b3246fd5876a 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -44,8 +44,12 @@ func (r Range[T]) Cmp(t T) v1.BoundsCheck {
return v1.Overlap
}
+func NewRange[T constraints.Unsigned](min, max T) Range[T] {
+ return Range[T]{Min: min, Max: max}
+}
+
func NewTokenRange(min, max uint32) Range[uint32] {
- return Range[uint32]{min, max}
+ return Range[uint32]{Min: min, Max: max}
}
type InstanceWithTokenRange struct {
From eb1379afb9518e7c3edbe85dbe409edf2b6c5b87 Mon Sep 17 00:00:00 2001
From: Danny Kopping
Date: Tue, 20 Feb 2024 19:54:02 +0200
Subject: [PATCH 095/130] Remove unnecessary ingester fields from metrics.go
which only work in single binary (#12005)
Signed-off-by: Danny Kopping
---
pkg/logql/metrics.go | 4 ----
1 file changed, 4 deletions(-)
diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go
index 694acefbc574..f9b9d1a8f952 100644
--- a/pkg/logql/metrics.go
+++ b/pkg/logql/metrics.go
@@ -177,8 +177,6 @@ func RecordRangeAndInstantQueryMetrics(
"ingester_chunk_refs", stats.Ingester.Store.GetTotalChunksRef(),
// Total number of chunks fetched.
"ingester_chunk_downloaded", stats.Ingester.Store.GetTotalChunksDownloaded(),
- // Time spent fetching chunks in nanoseconds.
- "ingester_chunk_fetch_time", stats.Ingester.Store.ChunksDownloadDuration(),
// Total of chunks matched by the query from ingesters.
"ingester_chunk_matches", stats.Ingester.GetTotalChunksMatched(),
// Total ingester reached for this query.
@@ -189,8 +187,6 @@ func RecordRangeAndInstantQueryMetrics(
"ingester_chunk_compressed_bytes", humanizeBytes(uint64(stats.Ingester.Store.Chunk.GetCompressedBytes())),
// Total bytes decompressed and processed from chunks. Includes structured metadata bytes.
"ingester_chunk_decompressed_bytes", humanizeBytes(uint64(stats.Ingester.Store.Chunk.GetDecompressedBytes())),
- // Total duplicates found while processing.
- "ingester_chunk_duplicates", stats.Ingester.Store.Chunk.GetTotalDuplicates(),
// Total lines post filtering.
"ingester_post_filter_lines", stats.Ingester.Store.Chunk.GetPostFilterLines(),
}...)
From 166229818d48c5a595a03df5417b989e23416100 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Tue, 20 Feb 2024 10:40:08 -0800
Subject: [PATCH 096/130] [Blooms] Consistent hashing via tokens for
bloomcompactor (#12002)
---
pkg/bloomcompactor/batch.go | 2 +-
pkg/bloomcompactor/bloomcompactor.go | 138 +++++++++++++++++++---
pkg/bloomcompactor/bloomcompactor_test.go | 81 +++++++++++--
pkg/bloomutils/ring.go | 49 ++------
pkg/bloomutils/ring_test.go | 56 ++++-----
pkg/loki/modules.go | 4 +-
6 files changed, 233 insertions(+), 97 deletions(-)
diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
index 920bff1decc8..e9fae9f9df0f 100644
--- a/pkg/bloomcompactor/batch.go
+++ b/pkg/bloomcompactor/batch.go
@@ -132,7 +132,7 @@ func newBatchedChunkLoader(
time.Unix(0, 0),
time.Unix(0, math.MaxInt64),
logproto.FORWARD,
- logql_log.NewNoopPipeline().ForStream(c.Metric),
+ logql_log.NewNoopPipeline().ForStream(nil),
)
if err != nil {
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index cc96cc7219e8..dd5a9c96ca81 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -2,6 +2,10 @@ package bloomcompactor
import (
"context"
+ "fmt"
+ "math"
+ "slices"
+ "sort"
"sync"
"time"
@@ -193,23 +197,120 @@ func (c *Compactor) tenants(ctx context.Context, table config.DayTable) (v1.Iter
}
// ownsTenant returns the ownership range for the tenant, if the compactor owns the tenant, and an error.
-func (c *Compactor) ownsTenant(tenant string) (v1.FingerprintBounds, bool, error) {
+func (c *Compactor) ownsTenant(tenant string) ([]v1.FingerprintBounds, bool, error) {
tenantRing, owned := c.sharding.OwnsTenant(tenant)
if !owned {
- return v1.FingerprintBounds{}, false, nil
+ return nil, false, nil
}
+ // TOOD(owen-d): use .GetTokenRangesForInstance()
+ // when it's supported for non zone-aware rings
+ // instead of doing all this manually
+
rs, err := tenantRing.GetAllHealthy(RingOp)
if err != nil {
- return v1.FingerprintBounds{}, false, errors.Wrap(err, "getting ring healthy instances")
-
+ return nil, false, errors.Wrap(err, "getting ring healthy instances")
}
- keyRange, err := bloomutils.KeyRangeForInstance(c.cfg.Ring.InstanceID, rs.Instances, bloomutils.Uint64Range)
+ ranges, err := tokenRangesForInstance(c.cfg.Ring.InstanceID, rs.Instances)
if err != nil {
- return v1.FingerprintBounds{}, false, errors.Wrap(err, "getting instance token range")
+ return nil, false, errors.Wrap(err, "getting token ranges for instance")
}
- return v1.NewBounds(model.Fingerprint(keyRange.Min), model.Fingerprint(keyRange.Max)), true, nil
+
+ keyspaces := bloomutils.KeyspacesFromTokenRanges(ranges)
+ return keyspaces, true, nil
+}
+
+func tokenRangesForInstance(id string, instances []ring.InstanceDesc) (ranges ring.TokenRanges, err error) {
+ var ownedTokens map[uint32]struct{}
+
+ // lifted from grafana/dskit/ring/model.go <*Desc>.GetTokens()
+ toks := make([][]uint32, 0, len(instances))
+ for _, instance := range instances {
+ if instance.Id == id {
+ ranges = make(ring.TokenRanges, 0, 2*(len(instance.Tokens)+1))
+ ownedTokens = make(map[uint32]struct{}, len(instance.Tokens))
+ for _, tok := range instance.Tokens {
+ ownedTokens[tok] = struct{}{}
+ }
+ }
+
+ // Tokens may not be sorted for an older version which, so we enforce sorting here.
+ tokens := instance.Tokens
+ if !sort.IsSorted(ring.Tokens(tokens)) {
+ sort.Sort(ring.Tokens(tokens))
+ }
+
+ toks = append(toks, tokens)
+ }
+
+ if cap(ranges) == 0 {
+ return nil, fmt.Errorf("instance %s not found", id)
+ }
+
+ allTokens := ring.MergeTokens(toks)
+ if len(allTokens) == 0 {
+ return nil, errors.New("no tokens in the ring")
+ }
+
+ // mostly lifted from grafana/dskit/ring/token_range.go <*Ring>.GetTokenRangesForInstance()
+
+ // non-zero value means we're now looking for start of the range. Zero value means we're looking for next end of range (ie. token owned by this instance).
+ rangeEnd := uint32(0)
+
+ // if this instance claimed the first token, it owns the wrap-around range, which we'll break into two separate ranges
+ firstToken := allTokens[0]
+ _, ownsFirstToken := ownedTokens[firstToken]
+
+ if ownsFirstToken {
+ // we'll start by looking for the beginning of the range that ends with math.MaxUint32
+ rangeEnd = math.MaxUint32
+ }
+
+ // walk the ring backwards, alternating looking for ends and starts of ranges
+ for i := len(allTokens) - 1; i > 0; i-- {
+ token := allTokens[i]
+ _, owned := ownedTokens[token]
+
+ if rangeEnd == 0 {
+ // we're looking for the end of the next range
+ if owned {
+ rangeEnd = token - 1
+ }
+ } else {
+ // we have a range end, and are looking for the start of the range
+ if !owned {
+ ranges = append(ranges, rangeEnd, token)
+ rangeEnd = 0
+ }
+ }
+ }
+
+ // finally look at the first token again
+ // - if we have a range end, check if we claimed token 0
+ // - if we don't, we have our start
+ // - if we do, the start is 0
+ // - if we don't have a range end, check if we claimed token 0
+ // - if we don't, do nothing
+ // - if we do, add the range of [0, token-1]
+ // - BUT, if the token itself is 0, do nothing, because we don't own the tokens themselves (we should be covered by the already added range that ends with MaxUint32)
+
+ if rangeEnd == 0 {
+ if ownsFirstToken && firstToken != 0 {
+ ranges = append(ranges, firstToken-1, 0)
+ }
+ } else {
+ if ownsFirstToken {
+ ranges = append(ranges, rangeEnd, 0)
+ } else {
+ ranges = append(ranges, rangeEnd, firstToken)
+ }
+ }
+
+ // Ensure returned ranges are sorted.
+ slices.Sort(ranges)
+
+ return ranges, nil
}
// runs a single round of compaction for all relevant tenants and tables
@@ -266,25 +367,28 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil {
c.metrics.tenantsDiscovered.Inc()
tenant := tenants.At()
- ownershipRange, owns, err := c.ownsTenant(tenant)
+ ownershipRanges, owns, err := c.ownsTenant(tenant)
if err != nil {
return errors.Wrap(err, "checking tenant ownership")
}
- level.Debug(c.logger).Log("msg", "enqueueing work for tenant", "tenant", tenant, "table", table, "ownership", ownershipRange.String(), "owns", owns)
+ level.Debug(c.logger).Log("msg", "enqueueing work for tenant", "tenant", tenant, "table", table, "ranges", len(ownershipRanges), "owns", owns)
if !owns {
c.metrics.tenantsSkipped.Inc()
continue
}
c.metrics.tenantsOwned.Inc()
- select {
- case ch <- tenantTable{
- tenant: tenant,
- table: table,
- ownershipRange: ownershipRange,
- }:
- case <-ctx.Done():
- return ctx.Err()
+ for _, ownershipRange := range ownershipRanges {
+
+ select {
+ case ch <- tenantTable{
+ tenant: tenant,
+ table: table,
+ ownershipRange: ownershipRange,
+ }:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
}
}
diff --git a/pkg/bloomcompactor/bloomcompactor_test.go b/pkg/bloomcompactor/bloomcompactor_test.go
index 475ba8ec0585..097e04d2a39a 100644
--- a/pkg/bloomcompactor/bloomcompactor_test.go
+++ b/pkg/bloomcompactor/bloomcompactor_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
@@ -113,7 +114,7 @@ func TestCompactor_ownsTenant(t *testing.T) {
require.NoError(t, err)
if ownsTenant {
compactorOwnsTenant++
- compactorOwnershipRange = append(compactorOwnershipRange, ownershipRange)
+ compactorOwnershipRange = append(compactorOwnershipRange, ownershipRange...)
}
}
require.Equal(t, tc.expectedCompactorsOwningTenant, compactorOwnsTenant)
@@ -135,12 +136,6 @@ func TestCompactor_ownsTenant(t *testing.T) {
coveredKeySpace.Max = boundsA.Max
}
- // Assert that the fingerprint key-space is evenly distributed across the compactors
- // We do some adjustments if the key-space is not evenly distributable, so we use a delta of 10
- // to account for that and check that the key-space is reasonably evenly distributed.
- fpPerTenant := math.MaxUint64 / uint64(tc.expectedCompactorsOwningTenant)
- boundsLen := uint64(boundsA.Max - boundsA.Min)
- require.InDelta(t, fpPerTenant, boundsLen, 10)
}
// Assert that the fingerprint key-space is complete
require.True(t, coveredKeySpace.Equal(v1.NewBounds(0, math.MaxUint64)))
@@ -195,3 +190,75 @@ func (m mockLimits) BloomFalsePositiveRate(_ string) float64 {
func (m mockLimits) BloomCompactorMaxBlockSize(_ string) int {
panic("implement me")
}
+
+func TestTokenRangesForInstance(t *testing.T) {
+ desc := func(id int, tokens ...uint32) ring.InstanceDesc {
+ return ring.InstanceDesc{Id: fmt.Sprintf("%d", id), Tokens: tokens}
+ }
+
+ tests := map[string]struct {
+ input []ring.InstanceDesc
+ exp map[string]ring.TokenRanges
+ err bool
+ }{
+ "no nodes": {
+ input: []ring.InstanceDesc{},
+ exp: map[string]ring.TokenRanges{
+ "0": {0, math.MaxUint32}, // have to put one in here to trigger test
+ },
+ err: true,
+ },
+ "one node": {
+ input: []ring.InstanceDesc{
+ desc(0, 0, 100),
+ },
+ exp: map[string]ring.TokenRanges{
+ "0": {0, math.MaxUint32},
+ },
+ },
+ "two nodes": {
+ input: []ring.InstanceDesc{
+ desc(0, 25, 75),
+ desc(1, 10, 50, 100),
+ },
+ exp: map[string]ring.TokenRanges{
+ "0": {10, 24, 50, 74},
+ "1": {0, 9, 25, 49, 75, math.MaxUint32},
+ },
+ },
+ "consecutive tokens": {
+ input: []ring.InstanceDesc{
+ desc(0, 99),
+ desc(1, 100),
+ },
+ exp: map[string]ring.TokenRanges{
+ "0": {0, 98, 100, math.MaxUint32},
+ "1": {99, 99},
+ },
+ },
+ "extremes": {
+ input: []ring.InstanceDesc{
+ desc(0, 0),
+ desc(1, math.MaxUint32),
+ },
+ exp: map[string]ring.TokenRanges{
+ "0": {math.MaxUint32, math.MaxUint32},
+ "1": {0, math.MaxUint32 - 1},
+ },
+ },
+ }
+
+ for desc, test := range tests {
+ t.Run(desc, func(t *testing.T) {
+ for id := range test.exp {
+ ranges, err := tokenRangesForInstance(id, test.input)
+ if test.err {
+ require.Error(t, err)
+ continue
+ }
+ require.NoError(t, err)
+ require.Equal(t, test.exp[id], ranges)
+ }
+ })
+ }
+}
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index b3246fd5876a..bc58bf09c886 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -3,14 +3,13 @@
package bloomutils
import (
- "errors"
"fmt"
"math"
"sort"
"github.com/grafana/dskit/ring"
+ "github.com/prometheus/common/model"
"golang.org/x/exp/constraints"
- "golang.org/x/exp/slices"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
@@ -72,44 +71,16 @@ func (i InstancesWithTokenRange) Contains(token uint32) bool {
return false
}
-// KeyRangeForInstance calculates the token range for a specific instance
-// with given id based on the first token in the ring.
-// This assumes that each instance in the ring is configured with only a single
-// token.
-func KeyRangeForInstance[T constraints.Unsigned](id string, instances []ring.InstanceDesc, keyspace Range[T]) (Range[T], error) {
-
- // Sort instances -- they may not be sorted
- // because they're usually accessed by looking up the tokens (which are sorted)
- sort.Slice(instances, func(i, j int) bool {
- return instances[i].Tokens[0] < instances[j].Tokens[0]
- })
-
- idx := slices.IndexFunc(instances, func(inst ring.InstanceDesc) bool {
- return inst.Id == id
- })
-
- // instance with Id == id not found
- if idx == -1 {
- return Range[T]{}, ring.ErrInstanceNotFound
+// TODO(owen-d): use https://github.com/grafana/loki/pull/11975 after merge
+func KeyspacesFromTokenRanges(tokenRanges ring.TokenRanges) []v1.FingerprintBounds {
+ keyspaces := make([]v1.FingerprintBounds, 0, len(tokenRanges)/2)
+ for i := 0; i < len(tokenRanges)-1; i += 2 {
+ keyspaces = append(keyspaces, v1.FingerprintBounds{
+ Min: model.Fingerprint(tokenRanges[i]) << 32,
+ Max: model.Fingerprint(tokenRanges[i+1])<<32 | model.Fingerprint(math.MaxUint32),
+ })
}
-
- diff := keyspace.Max - keyspace.Min
- i := T(idx)
- n := T(len(instances))
-
- if diff < n {
- return Range[T]{}, errors.New("keyspace is smaller than amount of instances")
- }
-
- step := diff / n
- min := step * i
- max := step*i + step - 1
- if i == n-1 {
- // extend the last token tange to MaxUint32
- max = (keyspace.Max - keyspace.Min)
- }
-
- return Range[T]{min, max}, nil
+ return keyspaces
}
// NewInstanceSortMergeIterator creates an iterator that yields instanceWithToken elements
diff --git a/pkg/bloomutils/ring_test.go b/pkg/bloomutils/ring_test.go
index c9ff6cf5e1d6..47ebb4766490 100644
--- a/pkg/bloomutils/ring_test.go
+++ b/pkg/bloomutils/ring_test.go
@@ -1,11 +1,14 @@
package bloomutils
import (
+ "fmt"
"math"
"testing"
"github.com/grafana/dskit/ring"
"github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
func TestBloomGatewayClient_InstanceSortMergeIterator(t *testing.T) {
@@ -40,45 +43,34 @@ func uint64Range(min, max uint64) Range[uint64] {
return Range[uint64]{min, max}
}
-func TestBloomGatewayClient_KeyRangeForInstance(t *testing.T) {
- for name, tc := range map[string]struct {
- id string
- input []ring.InstanceDesc
- expected Range[uint64]
+func TestKeyspacesFromTokenRanges(t *testing.T) {
+ for i, tc := range []struct {
+ tokenRanges ring.TokenRanges
+ exp []v1.FingerprintBounds
}{
- "first instance includes 0 token": {
- id: "3",
- input: []ring.InstanceDesc{
- {Id: "1", Tokens: []uint32{3}},
- {Id: "2", Tokens: []uint32{5}},
- {Id: "3", Tokens: []uint32{1}},
+ {
+ tokenRanges: ring.TokenRanges{
+ 0, math.MaxUint32 / 2,
+ math.MaxUint32/2 + 1, math.MaxUint32,
},
- expected: uint64Range(0, math.MaxUint64/3-1),
- },
- "middle instance": {
- id: "1",
- input: []ring.InstanceDesc{
- {Id: "1", Tokens: []uint32{3}},
- {Id: "2", Tokens: []uint32{5}},
- {Id: "3", Tokens: []uint32{1}},
+ exp: []v1.FingerprintBounds{
+ v1.NewBounds(0, math.MaxUint64/2),
+ v1.NewBounds(math.MaxUint64/2+1, math.MaxUint64),
},
- expected: uint64Range(math.MaxUint64/3, math.MaxUint64/3*2-1),
},
- "last instance includes MaxUint32 token": {
- id: "2",
- input: []ring.InstanceDesc{
- {Id: "1", Tokens: []uint32{3}},
- {Id: "2", Tokens: []uint32{5}},
- {Id: "3", Tokens: []uint32{1}},
+ {
+ tokenRanges: ring.TokenRanges{
+ 0, math.MaxUint8,
+ math.MaxUint16, math.MaxUint16 << 1,
+ },
+ exp: []v1.FingerprintBounds{
+ v1.NewBounds(0, 0xff00000000|math.MaxUint32),
+ v1.NewBounds(math.MaxUint16<<32, math.MaxUint16<<33|math.MaxUint32),
},
- expected: uint64Range(math.MaxUint64/3*2, math.MaxUint64),
},
} {
- tc := tc
- t.Run(name, func(t *testing.T) {
- result, err := KeyRangeForInstance(tc.id, tc.input, Uint64Range)
- require.NoError(t, err)
- require.Equal(t, tc.expected, result)
+ t.Run(fmt.Sprint(i), func(t *testing.T) {
+ require.Equal(t, tc.exp, KeyspacesFromTokenRanges(tc.tokenRanges))
})
}
}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 15ee955355a6..57c6e96a2b3d 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1446,7 +1446,9 @@ func (t *Loki) initBloomCompactorRing() (services.Service, error) {
// is LegacyMode needed?
// legacyReadMode := t.Cfg.LegacyReadTarget && t.isModuleActive(Read)
- rm, err := lokiring.NewRingManager(bloomCompactorRingKey, lokiring.ServerMode, t.Cfg.BloomCompactor.Ring, 1, 1, util_log.Logger, prometheus.DefaultRegisterer)
+ // TODO(owen-d): configurable num tokens, just use lifecycler config?
+ numTokens := 10
+ rm, err := lokiring.NewRingManager(bloomCompactorRingKey, lokiring.ServerMode, t.Cfg.BloomCompactor.Ring, 1, numTokens, util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, gerrors.Wrap(err, "error initializing bloom-compactor ring manager")
From b91bcecb3a5dc5fed99034f22b69e358892946ba Mon Sep 17 00:00:00 2001
From: Callum Styan
Date: Tue, 20 Feb 2024 12:40:29 -0800
Subject: [PATCH 097/130] quantile sharding; fix bug related to # of steps
(instant query mem usage) and use CollapsingLowestDense store for ddsketch
(#11905)
Signed-off-by: Callum Styan
---
pkg/logql/downstream_test.go | 51 ++++++++++++++++++++-
pkg/logql/engine.go | 2 +-
pkg/logql/quantile_over_time_sketch.go | 8 +++-
pkg/logql/quantile_over_time_sketch_test.go | 6 ++-
pkg/logql/sketch/quantile.go | 2 +-
5 files changed, 62 insertions(+), 7 deletions(-)
diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go
index ec5f3170468d..46575c44d8ed 100644
--- a/pkg/logql/downstream_test.go
+++ b/pkg/logql/downstream_test.go
@@ -146,7 +146,7 @@ func TestMappingEquivalenceSketches(t *testing.T) {
regular := NewEngine(opts, q, NoLimits, log.NewNopLogger())
sharded := NewDownstreamEngine(opts, MockDownstreamer{regular}, NoLimits, log.NewNopLogger())
- t.Run(tc.query, func(t *testing.T) {
+ t.Run(tc.query+"_range", func(t *testing.T) {
params, err := NewLiteralParams(
tc.query,
start,
@@ -178,6 +178,40 @@ func TestMappingEquivalenceSketches(t *testing.T) {
relativeError(t, res.Data.(promql.Matrix), shardedRes.Data.(promql.Matrix), tc.realtiveError)
})
+ t.Run(tc.query+"_instant", func(t *testing.T) {
+ // for an instant query we set the start and end to the same timestamp
+ // plus set step and interval to 0
+ params, err := NewLiteralParams(
+ tc.query,
+ time.Unix(0, int64(rounds+1)),
+ time.Unix(0, int64(rounds+1)),
+ 0,
+ 0,
+ logproto.FORWARD,
+ uint32(limit),
+ nil,
+ )
+ require.NoError(t, err)
+ qry := regular.Query(params)
+ ctx := user.InjectOrgID(context.Background(), "fake")
+
+ mapper := NewShardMapper(ConstantShards(shards), nilShardMetrics, []string{ShardQuantileOverTime})
+ _, _, mapped, err := mapper.Parse(params.GetExpression())
+ require.NoError(t, err)
+
+ shardedQry := sharded.Query(ctx, ParamsWithExpressionOverride{
+ Params: params,
+ ExpressionOverride: mapped,
+ })
+
+ res, err := qry.Exec(ctx)
+ require.NoError(t, err)
+
+ shardedRes, err := shardedQry.Exec(ctx)
+ require.NoError(t, err)
+
+ relativeErrorVector(t, res.Data.(promql.Vector), shardedRes.Data.(promql.Vector), tc.realtiveError)
+ })
}
}
@@ -546,6 +580,21 @@ func relativeError(t *testing.T, expected, actual promql.Matrix, alpha float64)
}
}
+func relativeErrorVector(t *testing.T, expected, actual promql.Vector, alpha float64) {
+ require.Len(t, actual, len(expected))
+
+ e := make([]float64, len(expected))
+ a := make([]float64, len(expected))
+ for i := 0; i < len(expected); i++ {
+ require.Equal(t, expected[i].Metric, actual[i].Metric)
+
+ e[i] = expected[i].F
+ a[i] = expected[i].F
+ }
+ require.InEpsilonSlice(t, e, a, alpha)
+
+}
+
func TestFormat_ShardedExpr(t *testing.T) {
oldMax := syntax.MaxCharsPerLine
syntax.MaxCharsPerLine = 20
diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go
index 8d951ad64c94..a9f3dabe14ee 100644
--- a/pkg/logql/engine.go
+++ b/pkg/logql/engine.go
@@ -363,7 +363,7 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_
maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture)
return q.JoinSampleVector(next, ts, vec, stepEvaluator, maxSeries)
case ProbabilisticQuantileVector:
- return JoinQuantileSketchVector(next, vec, stepEvaluator, q.params)
+ return MergeQuantileSketchVector(next, vec, stepEvaluator, q.params)
default:
return nil, fmt.Errorf("unsupported result type: %T", r)
}
diff --git a/pkg/logql/quantile_over_time_sketch.go b/pkg/logql/quantile_over_time_sketch.go
index 507c72b208ab..24a8a05d89ed 100644
--- a/pkg/logql/quantile_over_time_sketch.go
+++ b/pkg/logql/quantile_over_time_sketch.go
@@ -262,13 +262,17 @@ func (r *quantileSketchBatchRangeVectorIterator) agg(samples []promql.FPoint) sk
return s
}
-// JoinQuantileSketchVector joins the results from stepEvaluator into a ProbabilisticQuantileMatrix.
-func JoinQuantileSketchVector(next bool, r StepResult, stepEvaluator StepEvaluator, params Params) (promql_parser.Value, error) {
+// MergeQuantileSketchVector joins the results from stepEvaluator into a ProbabilisticQuantileMatrix.
+func MergeQuantileSketchVector(next bool, r StepResult, stepEvaluator StepEvaluator, params Params) (promql_parser.Value, error) {
vec := r.QuantileSketchVec()
if stepEvaluator.Error() != nil {
return nil, stepEvaluator.Error()
}
+ if GetRangeType(params) == InstantType {
+ return ProbabilisticQuantileMatrix{vec}, nil
+ }
+
stepCount := int(math.Ceil(float64(params.End().Sub(params.Start()).Nanoseconds()) / float64(params.Step().Nanoseconds())))
if stepCount <= 0 {
stepCount = 1
diff --git a/pkg/logql/quantile_over_time_sketch_test.go b/pkg/logql/quantile_over_time_sketch_test.go
index dc1ff31f509a..488ebdec26f0 100644
--- a/pkg/logql/quantile_over_time_sketch_test.go
+++ b/pkg/logql/quantile_over_time_sketch_test.go
@@ -69,7 +69,7 @@ func TestJoinQuantileSketchVectorError(t *testing.T) {
ev := errorStepEvaluator{
err: errors.New("could not evaluate"),
}
- _, err := JoinQuantileSketchVector(true, result, ev, LiteralParams{})
+ _, err := MergeQuantileSketchVector(true, result, ev, LiteralParams{})
require.ErrorContains(t, err, "could not evaluate")
}
@@ -136,7 +136,7 @@ func BenchmarkJoinQuantileSketchVector(b *testing.B) {
iter: iter,
}
_, _, r := ev.Next()
- m, err := JoinQuantileSketchVector(true, r.QuantileSketchVec(), ev, params)
+ m, err := MergeQuantileSketchVector(true, r.QuantileSketchVec(), ev, params)
require.NoError(b, err)
m.(ProbabilisticQuantileMatrix).Release()
}
@@ -148,7 +148,9 @@ func BenchmarkQuantileBatchRangeVectorIteratorAt(b *testing.B) {
}{
{numberSamples: 1},
{numberSamples: 1_000},
+ {numberSamples: 10_000},
{numberSamples: 100_000},
+ {numberSamples: 1_000_000},
} {
b.Run(fmt.Sprintf("%d-samples", tc.numberSamples), func(b *testing.B) {
r := rand.New(rand.NewSource(42))
diff --git a/pkg/logql/sketch/quantile.go b/pkg/logql/sketch/quantile.go
index 1fa20c38e5bc..3b8b0f22fc8e 100644
--- a/pkg/logql/sketch/quantile.go
+++ b/pkg/logql/sketch/quantile.go
@@ -47,7 +47,7 @@ const relativeAccuracy = 0.01
var ddsketchPool = sync.Pool{
New: func() any {
m, _ := mapping.NewCubicallyInterpolatedMapping(relativeAccuracy)
- return ddsketch.NewDDSketchFromStoreProvider(m, store.SparseStoreConstructor)
+ return ddsketch.NewDDSketch(m, store.NewCollapsingLowestDenseStore(2048), store.NewCollapsingLowestDenseStore(2048))
},
}
From 09aa0f80b5ad8cc0b9dbd721bdbecbb58f01a781 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Tue, 20 Feb 2024 13:25:00 -0800
Subject: [PATCH 098/130] Blooms/contiguous bounds union (#12009)
---
pkg/bloomcompactor/controller.go | 3 +++
pkg/bloomcompactor/metrics.go | 8 ++++++++
pkg/storage/bloom/v1/bounds.go | 17 ++++++++++++++---
pkg/storage/bloom/v1/bounds_test.go | 17 +++++++++++++----
pkg/storage/bloom/v1/index.go | 4 ----
5 files changed, 38 insertions(+), 11 deletions(-)
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index 2a4ff6cd4524..2d0f84a7a405 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -346,6 +346,7 @@ func (s *SimpleBloomController) buildGaps(
// Fetch blocks that aren't up to date but are in the desired fingerprint range
// to try and accelerate bloom creation
+ level.Debug(logger).Log("msg", "loading series and blocks for gap", "blocks", len(gap.blocks))
seriesItr, blocksIter, err := s.loadWorkForGap(ctx, table, tenant, plan.tsdb, gap)
if err != nil {
level.Error(logger).Log("msg", "failed to get series and blocks", "err", err)
@@ -436,6 +437,8 @@ func (s *SimpleBloomController) buildGaps(
created = append(created, meta)
totalSeries += uint64(seriesItrWithCounter.Count())
+
+ s.metrics.blocksReused.Add(float64(len(gap.blocks)))
}
}
diff --git a/pkg/bloomcompactor/metrics.go b/pkg/bloomcompactor/metrics.go
index 74378cb78642..9f844f0e40f7 100644
--- a/pkg/bloomcompactor/metrics.go
+++ b/pkg/bloomcompactor/metrics.go
@@ -32,6 +32,8 @@ type Metrics struct {
tenantsCompletedTime *prometheus.HistogramVec
tenantsSeries prometheus.Histogram
+ blocksReused prometheus.Counter
+
blocksCreated prometheus.Counter
blocksDeleted prometheus.Counter
metasCreated prometheus.Counter
@@ -120,6 +122,12 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
// Up to 10M series per tenant, way more than what we expect given our max_global_streams_per_user limits
Buckets: prometheus.ExponentialBucketsRange(1, 10000000, 10),
}),
+ blocksReused: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "blocks_reused_total",
+ Help: "Number of overlapping bloom blocks reused when creating new blocks",
+ }),
blocksCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
diff --git a/pkg/storage/bloom/v1/bounds.go b/pkg/storage/bloom/v1/bounds.go
index 8f3edd914209..e7ff804d55cd 100644
--- a/pkg/storage/bloom/v1/bounds.go
+++ b/pkg/storage/bloom/v1/bounds.go
@@ -125,10 +125,21 @@ func (b FingerprintBounds) Intersection(target FingerprintBounds) *FingerprintBo
// Union returns the union of the two bounds
func (b FingerprintBounds) Union(target FingerprintBounds) (res []FingerprintBounds) {
if !b.Overlaps(target) {
- if b.Less(target) {
- return []FingerprintBounds{b, target}
+ if target.Less(b) {
+ b, target = target, b
}
- return []FingerprintBounds{target, b}
+
+ // special case: if the bounds are contiguous, merge them
+ if b.Max+1 == target.Min {
+ return []FingerprintBounds{
+ {
+ Min: min(b.Min, target.Min),
+ Max: max(b.Max, target.Max),
+ },
+ }
+ }
+
+ return []FingerprintBounds{b, target}
}
return []FingerprintBounds{
diff --git a/pkg/storage/bloom/v1/bounds_test.go b/pkg/storage/bloom/v1/bounds_test.go
index e8362a2b283f..1d687437fab6 100644
--- a/pkg/storage/bloom/v1/bounds_test.go
+++ b/pkg/storage/bloom/v1/bounds_test.go
@@ -82,20 +82,29 @@ func Test_FingerprintBounds_Intersection(t *testing.T) {
func Test_FingerprintBounds_Union(t *testing.T) {
t.Parallel()
target := NewBounds(10, 20)
+
assert.Equal(t, []FingerprintBounds{
- {Min: 1, Max: 9},
+ {Min: 1, Max: 8},
{Min: 10, Max: 20},
- }, NewBounds(1, 9).Union(target))
+ }, NewBounds(1, 8).Union(target))
assert.Equal(t, []FingerprintBounds{
{Min: 10, Max: 20},
- {Min: 21, Max: 30},
- }, NewBounds(21, 30).Union(target))
+ {Min: 22, Max: 30},
+ }, NewBounds(22, 30).Union(target))
assert.Equal(t, []FingerprintBounds{
{Min: 10, Max: 20},
}, NewBounds(10, 20).Union(target))
assert.Equal(t, []FingerprintBounds{
{Min: 5, Max: 20},
}, NewBounds(5, 15).Union(target))
+ // contiguous range, target before
+ assert.Equal(t, []FingerprintBounds{
+ {Min: 10, Max: 25},
+ }, NewBounds(21, 25).Union(target))
+ // contiguous range, target after
+ assert.Equal(t, []FingerprintBounds{
+ {Min: 5, Max: 20},
+ }, NewBounds(5, 9).Union(target))
}
func Test_FingerprintBounds_Unless(t *testing.T) {
diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go
index 58d43b8cd0ac..eac8276400b9 100644
--- a/pkg/storage/bloom/v1/index.go
+++ b/pkg/storage/bloom/v1/index.go
@@ -218,10 +218,6 @@ type SeriesHeader struct {
FromTs, ThroughTs model.Time
}
-func (h SeriesHeader) OverlapFingerprintRange(other SeriesHeader) bool {
- return h.Bounds.Overlaps(other.Bounds)
-}
-
// build one aggregated header for the entire block
func aggregateHeaders(xs []SeriesHeader) SeriesHeader {
if len(xs) == 0 {
From 65b3ec441baec24a7e2abdec1d6d09792048e7d4 Mon Sep 17 00:00:00 2001
From: Trevor Whitney
Date: Tue, 20 Feb 2024 14:26:11 -0700
Subject: [PATCH 099/130] chore: update to latest release action (#11808)
---
.drone/drone.jsonnet | 267 +-----
.drone/drone.yml | 724 +---------------
.github/jsonnetfile.json | 15 +
.github/jsonnetfile.lock.json | 16 +
.github/release-workflows.jsonnet | 58 ++
.../loki-release/workflows/build.libsonnet | 154 ++++
.../loki-release/workflows/common.libsonnet | 124 +++
.../loki-release/workflows/main.jsonnet | 111 +++
.../loki-release/workflows/release.libsonnet | 144 ++++
.../loki-release/workflows/validate.libsonnet | 114 +++
.../loki-release/workflows/workflows.jsonnet | 30 +
.github/vendor/workflows | 1 +
.github/workflows/check.yml | 12 +
.github/workflows/checks.yml | 28 -
.github/workflows/conventional-commits.yml | 10 +
.github/workflows/minor-release-pr.yml | 788 +++++++++++++++++-
.github/workflows/patch-release-pr.yml | 788 +++++++++++++++++-
.github/workflows/release.yml | 144 +++-
.release-please-manifest.json | 5 +-
Makefile | 4 +
20 files changed, 2470 insertions(+), 1067 deletions(-)
create mode 100644 .github/jsonnetfile.json
create mode 100644 .github/jsonnetfile.lock.json
create mode 100644 .github/release-workflows.jsonnet
create mode 100644 .github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet
create mode 100644 .github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet
create mode 100644 .github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet
create mode 100644 .github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet
create mode 100644 .github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet
create mode 100644 .github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet
create mode 120000 .github/vendor/workflows
create mode 100644 .github/workflows/check.yml
delete mode 100644 .github/workflows/checks.yml
create mode 100644 .github/workflows/conventional-commits.yml
diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet
index 9351f2f693c0..9d0589fe22a8 100644
--- a/.drone/drone.jsonnet
+++ b/.drone/drone.jsonnet
@@ -177,16 +177,6 @@ local promtail_win() = pipeline('promtail-windows') {
local querytee() = pipeline('querytee-amd64') + arch_image('amd64', 'main') {
steps+: [
- // dry run for everything that is not tag or main
- docker('amd64', 'querytee') {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- repo: 'grafana/loki-query-tee',
- },
- },
- ] + [
// publish for tag or main
docker('amd64', 'querytee') {
depends_on: ['image-tag'],
@@ -196,21 +186,10 @@ local querytee() = pipeline('querytee-amd64') + arch_image('amd64', 'main') {
},
},
],
- depends_on: ['check'],
};
local fluentbit(arch) = pipeline('fluent-bit-' + arch) + arch_image(arch) {
steps+: [
- // dry run for everything that is not tag or main
- clients_docker(arch, 'fluent-bit') {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- repo: 'grafana/fluent-bit-plugin-loki',
- },
- },
- ] + [
// publish for tag or main
clients_docker(arch, 'fluent-bit') {
depends_on: ['image-tag'],
@@ -220,21 +199,10 @@ local fluentbit(arch) = pipeline('fluent-bit-' + arch) + arch_image(arch) {
},
},
],
- depends_on: ['check'],
};
local fluentd() = pipeline('fluentd-amd64') + arch_image('amd64', 'main') {
steps+: [
- // dry run for everything that is not tag or main
- clients_docker('amd64', 'fluentd') {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- repo: 'grafana/fluent-plugin-loki',
- },
- },
- ] + [
// publish for tag or main
clients_docker('amd64', 'fluentd') {
depends_on: ['image-tag'],
@@ -244,21 +212,10 @@ local fluentd() = pipeline('fluentd-amd64') + arch_image('amd64', 'main') {
},
},
],
- depends_on: ['check'],
};
local logstash() = pipeline('logstash-amd64') + arch_image('amd64', 'main') {
steps+: [
- // dry run for everything that is not tag or main
- clients_docker('amd64', 'logstash') {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- repo: 'grafana/logstash-output-loki',
- },
- },
- ] + [
// publish for tag or main
clients_docker('amd64', 'logstash') {
depends_on: ['image-tag'],
@@ -268,20 +225,10 @@ local logstash() = pipeline('logstash-amd64') + arch_image('amd64', 'main') {
},
},
],
- depends_on: ['check'],
};
local promtail(arch) = pipeline('promtail-' + arch) + arch_image(arch) {
steps+: [
- // dry run for everything that is not tag or main
- clients_docker(arch, 'promtail') {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- },
- },
- ] + [
// publish for tag or main
clients_docker(arch, 'promtail') {
depends_on: ['image-tag'],
@@ -289,7 +236,6 @@ local promtail(arch) = pipeline('promtail-' + arch) + arch_image(arch) {
settings+: {},
},
],
- depends_on: ['check'],
};
local lambda_promtail(arch) = pipeline('lambda-promtail-' + arch) + arch_image(arch) {
@@ -297,15 +243,6 @@ local lambda_promtail(arch) = pipeline('lambda-promtail-' + arch) + arch_image(a
steps+: [
skipStep,
- // dry run for everything that is not tag or main
- lambda_promtail_ecr('lambda-promtail') {
- depends_on: ['image-tag', skipStep.name],
- when: onPRs,
- settings+: {
- dry_run: true,
- },
- },
- ] + [
// publish for tag or main
lambda_promtail_ecr('lambda-promtail') {
depends_on: ['image-tag'],
@@ -313,20 +250,10 @@ local lambda_promtail(arch) = pipeline('lambda-promtail-' + arch) + arch_image(a
settings+: {},
},
],
- depends_on: ['check'],
};
local lokioperator(arch) = pipeline('lokioperator-' + arch) + arch_image(arch) {
steps+: [
- // dry run for everything that is not tag or main
- docker_operator(arch, 'loki-operator') {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- },
- },
- ] + [
// publish for tag or main
docker_operator(arch, 'loki-operator') {
depends_on: ['image-tag'],
@@ -336,21 +263,10 @@ local lokioperator(arch) = pipeline('lokioperator-' + arch) + arch_image(arch) {
settings+: {},
},
],
- depends_on: ['check'],
};
local logql_analyzer() = pipeline('logql-analyzer') + arch_image('amd64') {
steps+: [
- // dry run for everything that is not tag or main
- docker('amd64', 'logql-analyzer') {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- repo: 'grafana/logql-analyzer',
- },
- },
- ] + [
// publish for tag or main
docker('amd64', 'logql-analyzer') {
depends_on: ['image-tag'],
@@ -360,21 +276,10 @@ local logql_analyzer() = pipeline('logql-analyzer') + arch_image('amd64') {
},
},
],
- depends_on: ['check'],
};
local multiarch_image(arch) = pipeline('docker-' + arch) + arch_image(arch) {
steps+: [
- // dry run for everything that is not tag or main
- docker(arch, app) {
- depends_on: ['image-tag'],
- when: onPRs,
- settings+: {
- dry_run: true,
- },
- }
- for app in apps
- ] + [
// publish for tag or main
docker(arch, app) {
depends_on: ['image-tag'],
@@ -383,7 +288,6 @@ local multiarch_image(arch) = pipeline('docker-' + arch) + arch_image(arch) {
}
for app in apps
],
- depends_on: ['check'],
};
local manifest(apps) = pipeline('manifest') {
@@ -508,21 +412,6 @@ local build_image_tag = '0.33.0';
arch: arch,
},
steps: [
- {
- name: 'test',
- image: 'plugins/docker',
- when: onPRs + onPath('loki-build-image/**'),
- environment: {
- DOCKER_BUILDKIT: 1,
- },
- settings: {
- repo: 'grafana/loki-build-image',
- context: 'loki-build-image',
- dockerfile: 'loki-build-image/Dockerfile',
- tags: [build_image_tag + '-' + arch],
- dry_run: true,
- },
- },
{
name: 'push',
image: 'plugins/docker',
@@ -571,16 +460,6 @@ local build_image_tag = '0.33.0';
path: 'loki',
},
steps: [
- {
- name: 'test-image',
- image: 'plugins/docker',
- when: onPRs + onPath('production/helm/loki/src/helm-test/**'),
- settings: {
- repo: 'grafana/loki-helm-test',
- dockerfile: 'production/helm/loki/src/helm-test/Dockerfile',
- dry_run: true,
- },
- },
{
name: 'push-image',
image: 'plugins/docker',
@@ -595,47 +474,6 @@ local build_image_tag = '0.33.0';
},
],
},
- pipeline('check') {
- workspace: {
- base: '/src',
- path: 'loki',
- },
- steps: [
- make('check-drone-drift', container=false) { depends_on: ['clone'] },
- make('check-generated-files', container=false) { depends_on: ['clone'] },
- run('clone-target-branch', commands=[
- 'cd ..',
- 'echo "cloning "$DRONE_TARGET_BRANCH ',
- 'git clone -b $DRONE_TARGET_BRANCH $CI_REPO_REMOTE loki-target-branch',
- 'cd -',
- ]) { depends_on: ['clone'], when: onPRs },
- make('test', container=false) { depends_on: ['clone-target-branch', 'check-generated-files'] },
- make('lint', container=false) { depends_on: ['check-generated-files'] },
- make('check-mod', container=false) { depends_on: ['test', 'lint'] },
- {
- name: 'shellcheck',
- image: 'koalaman/shellcheck-alpine:stable',
- commands: ['apk add make bash && make lint-scripts'],
- },
- make('loki', container=false) { depends_on: ['check-generated-files'] },
- make('check-doc', container=false) { depends_on: ['loki'] },
- make('check-format', container=false, args=[
- 'GIT_TARGET_BRANCH="$DRONE_TARGET_BRANCH"',
- ]) { depends_on: ['loki'], when: onPRs },
- make('validate-example-configs', container=false) { depends_on: ['loki'] },
- make('validate-dev-cluster-config', container=false) { depends_on: ['validate-example-configs'] },
- make('check-example-config-doc', container=false) { depends_on: ['clone'] },
- {
- name: 'build-docs-website',
- image: 'grafana/docs-base:e6ef023f8b8',
- commands: [
- 'mkdir -p /hugo/content/docs/loki/latest',
- 'cp -r docs/sources/* /hugo/content/docs/loki/latest/',
- 'cd /hugo && make prod',
- ],
- },
- ],
- },
pipeline('mixins') {
workspace: {
base: '/src',
@@ -771,7 +609,7 @@ local build_image_tag = '0.33.0';
depends_on: ['manifest'],
image_pull_secrets: [pull_secret.name],
trigger: {
- // wee need to run it only on Loki tags that starts with `v`.
+ // we need to run it only on Loki tags that starts with `v`.
ref: ['refs/tags/v*'],
},
steps: [
@@ -818,109 +656,6 @@ local build_image_tag = '0.33.0';
},
promtail_win(),
logql_analyzer(),
- pipeline('release') {
- trigger+: {
- event: ['pull_request', 'tag'],
- },
- depends_on+: ['check'],
- image_pull_secrets: [pull_secret.name],
- volumes+: [
- {
- name: 'cgroup',
- host: {
- path: '/sys/fs/cgroup',
- },
- },
- {
- name: 'docker',
- host: {
- path: '/var/run/docker.sock',
- },
- },
- ],
- // Launch docker images with systemd
- services: [
- {
- name: 'systemd-debian',
- image: 'jrei/systemd-debian:12',
- volumes: [
- {
- name: 'cgroup',
- path: '/sys/fs/cgroup',
- },
- ],
- privileged: true,
- },
- {
- name: 'systemd-centos',
- image: 'jrei/systemd-centos:8',
- volumes: [
- {
- name: 'cgroup',
- path: '/sys/fs/cgroup',
- },
- ],
- privileged: true,
- },
- ],
- // Package and test the packages
- steps: [
- skipMissingSecretPipelineStep(gpg_private_key.name), // Needs GPG keys to run
- {
- name: 'fetch-tags',
- image: 'alpine',
- commands: [
- 'apk add --no-cache bash git',
- 'git fetch origin --tags',
- ],
- },
- run('write-key',
- commands=['printf "%s" "$NFPM_SIGNING_KEY" > $NFPM_SIGNING_KEY_FILE'],
- env={
- NFPM_SIGNING_KEY: { from_secret: gpg_private_key.name },
- NFPM_SIGNING_KEY_FILE: '/drone/src/private-key.key',
- }),
- run('test packaging',
- commands=[
- 'make BUILD_IN_CONTAINER=false packages',
- ],
- env={
- NFPM_PASSPHRASE: { from_secret: gpg_passphrase.name },
- NFPM_SIGNING_KEY_FILE: '/drone/src/private-key.key',
- }),
- {
- name: 'test deb package',
- image: 'docker',
- commands: ['./tools/packaging/verify-deb-install.sh'],
- volumes: [
- {
- name: 'docker',
- path: '/var/run/docker.sock',
- },
- ],
- privileged: true,
- },
- {
- name: 'test rpm package',
- image: 'docker',
- commands: ['./tools/packaging/verify-rpm-install.sh'],
- volumes: [
- {
- name: 'docker',
- path: '/var/run/docker.sock',
- },
- ],
- privileged: true,
- },
- run('publish',
- commands=['make BUILD_IN_CONTAINER=false publish'],
- env={
- GITHUB_TOKEN: { from_secret: github_secret.name },
- NFPM_PASSPHRASE: { from_secret: gpg_passphrase.name },
- NFPM_SIGNING_KEY_FILE: '/drone/src/private-key.key',
- }) { when: { event: ['tag'] } },
- ],
- },
pipeline('docker-driver') {
trigger+: onTagOrMain,
steps: [
diff --git a/.drone/drone.yml b/.drone/drone.yml
index c33a66998e71..ccac7a2c6ce5 100644
--- a/.drone/drone.yml
+++ b/.drone/drone.yml
@@ -5,22 +5,6 @@ platform:
arch: amd64
os: linux
steps:
-- environment:
- DOCKER_BUILDKIT: 1
- image: plugins/docker
- name: test
- settings:
- context: loki-build-image
- dockerfile: loki-build-image/Dockerfile
- dry_run: true
- repo: grafana/loki-build-image
- tags:
- - 0.33.0-amd64
- when:
- event:
- - pull_request
- paths:
- - loki-build-image/**
- environment:
DOCKER_BUILDKIT: 1
image: plugins/docker
@@ -58,22 +42,6 @@ platform:
arch: arm64
os: linux
steps:
-- environment:
- DOCKER_BUILDKIT: 1
- image: plugins/docker
- name: test
- settings:
- context: loki-build-image
- dockerfile: loki-build-image/Dockerfile
- dry_run: true
- repo: grafana/loki-build-image
- tags:
- - 0.33.0-arm64
- when:
- event:
- - pull_request
- paths:
- - loki-build-image/**
- environment:
DOCKER_BUILDKIT: 1
image: plugins/docker
@@ -137,17 +105,6 @@ trigger:
kind: pipeline
name: helm-test-image
steps:
-- image: plugins/docker
- name: test-image
- settings:
- dockerfile: production/helm/loki/src/helm-test/Dockerfile
- dry_run: true
- repo: grafana/loki-helm-test
- when:
- event:
- - pull_request
- paths:
- - production/helm/loki/src/helm-test/**
- image: plugins/docker
name: push-image
settings:
@@ -175,124 +132,6 @@ workspace:
path: loki
---
kind: pipeline
-name: check
-steps:
-- commands:
- - make BUILD_IN_CONTAINER=false check-drone-drift
- depends_on:
- - clone
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: check-drone-drift
-- commands:
- - make BUILD_IN_CONTAINER=false check-generated-files
- depends_on:
- - clone
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: check-generated-files
-- commands:
- - cd ..
- - 'echo "cloning "$DRONE_TARGET_BRANCH '
- - git clone -b $DRONE_TARGET_BRANCH $CI_REPO_REMOTE loki-target-branch
- - cd -
- depends_on:
- - clone
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: clone-target-branch
- when:
- event:
- - pull_request
-- commands:
- - make BUILD_IN_CONTAINER=false test
- depends_on:
- - clone-target-branch
- - check-generated-files
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: test
-- commands:
- - make BUILD_IN_CONTAINER=false lint
- depends_on:
- - check-generated-files
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: lint
-- commands:
- - make BUILD_IN_CONTAINER=false check-mod
- depends_on:
- - test
- - lint
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: check-mod
-- commands:
- - apk add make bash && make lint-scripts
- image: koalaman/shellcheck-alpine:stable
- name: shellcheck
-- commands:
- - make BUILD_IN_CONTAINER=false loki
- depends_on:
- - check-generated-files
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: loki
-- commands:
- - make BUILD_IN_CONTAINER=false check-doc
- depends_on:
- - loki
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: check-doc
-- commands:
- - make BUILD_IN_CONTAINER=false check-format GIT_TARGET_BRANCH="$DRONE_TARGET_BRANCH"
- depends_on:
- - loki
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: check-format
- when:
- event:
- - pull_request
-- commands:
- - make BUILD_IN_CONTAINER=false validate-example-configs
- depends_on:
- - loki
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: validate-example-configs
-- commands:
- - make BUILD_IN_CONTAINER=false validate-dev-cluster-config
- depends_on:
- - validate-example-configs
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: validate-dev-cluster-config
-- commands:
- - make BUILD_IN_CONTAINER=false check-example-config-doc
- depends_on:
- - clone
- environment: {}
- image: grafana/loki-build-image:0.33.0
- name: check-example-config-doc
-- commands:
- - mkdir -p /hugo/content/docs/loki/latest
- - cp -r docs/sources/* /hugo/content/docs/loki/latest/
- - cd /hugo && make prod
- image: grafana/docs-base:e6ef023f8b8
- name: build-docs-website
-trigger:
- ref:
- - refs/heads/main
- - refs/heads/k???
- - refs/tags/v*
- - refs/pull/*/head
-workspace:
- base: /src
- path: loki
----
-kind: pipeline
name: mixins
steps:
- commands:
@@ -344,8 +183,6 @@ workspace:
base: /src
path: loki
---
-depends_on:
-- check
kind: pipeline
name: docker-amd64
platform:
@@ -358,66 +195,6 @@ steps:
- echo $(./tools/image-tag)-amd64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-image
- settings:
- dockerfile: cmd/loki/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-canary-image
- settings:
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-canary
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-canary-boringcrypto-image
- settings:
- dockerfile: cmd/loki-canary-boringcrypto/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-canary-boringcrypto
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-logcli-image
- settings:
- dockerfile: cmd/logcli/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/logcli
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -489,8 +266,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: docker-arm64
platform:
@@ -503,66 +278,6 @@ steps:
- echo $(./tools/image-tag)-arm64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-image
- settings:
- dockerfile: cmd/loki/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-canary-image
- settings:
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-canary
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-canary-boringcrypto-image
- settings:
- dockerfile: cmd/loki-canary-boringcrypto/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-canary-boringcrypto
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-logcli-image
- settings:
- dockerfile: cmd/logcli/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/logcli
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -634,8 +349,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: docker-arm
platform:
@@ -648,66 +361,6 @@ steps:
- echo $(./tools/image-tag)-arm > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker:linux-arm
- name: build-loki-image
- settings:
- dockerfile: cmd/loki/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker:linux-arm
- name: build-loki-canary-image
- settings:
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-canary
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker:linux-arm
- name: build-loki-canary-boringcrypto-image
- settings:
- dockerfile: cmd/loki-canary-boringcrypto/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-canary-boringcrypto
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
-- depends_on:
- - image-tag
- image: plugins/docker:linux-arm
- name: build-logcli-image
- settings:
- dockerfile: cmd/logcli/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/logcli
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker:linux-arm
@@ -779,8 +432,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: promtail-amd64
platform:
@@ -793,21 +444,6 @@ steps:
- echo $(./tools/image-tag)-amd64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-promtail-image
- settings:
- dockerfile: clients/cmd/promtail/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/promtail
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -831,35 +467,18 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: promtail-arm64
platform:
arch: arm64
os: linux
steps:
-- commands:
- - apk add --no-cache bash git
- - git fetch origin --tags
- - echo $(./tools/image-tag)-arm64 > .tags
- image: alpine
- name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-promtail-image
- settings:
- dockerfile: clients/cmd/promtail/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/promtail
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
+- commands:
+ - apk add --no-cache bash git
+ - git fetch origin --tags
+ - echo $(./tools/image-tag)-arm64 > .tags
+ image: alpine
+ name: image-tag
- depends_on:
- image-tag
image: plugins/docker
@@ -883,8 +502,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: promtail-arm
platform:
@@ -897,21 +514,6 @@ steps:
- echo $(./tools/image-tag)-arm > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker:linux-arm
- name: build-promtail-image
- settings:
- dockerfile: clients/cmd/promtail/Dockerfile.arm32
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/promtail
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker:linux-arm
@@ -935,8 +537,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: lokioperator-amd64
platform:
@@ -949,22 +549,6 @@ steps:
- echo $(./tools/image-tag)-amd64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-operator-image
- settings:
- context: operator
- dockerfile: operator/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-operator
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -991,8 +575,6 @@ trigger:
- refs/tags/operator/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: lokioperator-arm64
platform:
@@ -1005,22 +587,6 @@ steps:
- echo $(./tools/image-tag)-arm64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-loki-operator-image
- settings:
- context: operator
- dockerfile: operator/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-operator
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -1047,8 +613,6 @@ trigger:
- refs/tags/operator/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: lokioperator-arm
platform:
@@ -1061,22 +625,6 @@ steps:
- echo $(./tools/image-tag)-arm > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker:linux-arm
- name: build-loki-operator-image
- settings:
- context: operator
- dockerfile: operator/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-operator
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker:linux-arm
@@ -1103,8 +651,6 @@ trigger:
- refs/tags/operator/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: fluent-bit-amd64
platform:
@@ -1117,21 +663,6 @@ steps:
- echo $(./tools/image-tag)-amd64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-fluent-bit-image
- settings:
- dockerfile: clients/cmd/fluent-bit/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/fluent-bit-plugin-loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -1155,8 +686,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: fluent-bit-arm64
platform:
@@ -1169,21 +698,6 @@ steps:
- echo $(./tools/image-tag)-arm64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-fluent-bit-image
- settings:
- dockerfile: clients/cmd/fluent-bit/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/fluent-bit-plugin-loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -1207,8 +721,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: fluent-bit-arm
platform:
@@ -1221,21 +733,6 @@ steps:
- echo $(./tools/image-tag)-arm > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker:linux-arm
- name: build-fluent-bit-image
- settings:
- dockerfile: clients/cmd/fluent-bit/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/fluent-bit-plugin-loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker:linux-arm
@@ -1259,8 +756,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: fluentd-amd64
platform:
@@ -1274,21 +769,6 @@ steps:
- echo ",main" >> .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-fluentd-image
- settings:
- dockerfile: clients/cmd/fluentd/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/fluent-plugin-loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -1312,8 +792,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: logstash-amd64
platform:
@@ -1327,21 +805,6 @@ steps:
- echo ",main" >> .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-logstash-image
- settings:
- dockerfile: clients/cmd/logstash/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/logstash-output-loki
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -1365,8 +828,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: querytee-amd64
platform:
@@ -1380,21 +841,6 @@ steps:
- echo ",main" >> .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-querytee-image
- settings:
- dockerfile: cmd/querytee/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/loki-query-tee
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -1645,8 +1091,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: logql-analyzer
platform:
@@ -1659,21 +1103,6 @@ steps:
- echo $(./tools/image-tag)-amd64 > .tags
image: alpine
name: image-tag
-- depends_on:
- - image-tag
- image: plugins/docker
- name: build-logql-analyzer-image
- settings:
- dockerfile: cmd/logql-analyzer/Dockerfile
- dry_run: true
- password:
- from_secret: docker_password
- repo: grafana/logql-analyzer
- username:
- from_secret: docker_username
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: plugins/docker
@@ -1697,103 +1126,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
-image_pull_secrets:
-- dockerconfigjson
-kind: pipeline
-name: release
-services:
-- image: jrei/systemd-debian:12
- name: systemd-debian
- privileged: true
- volumes:
- - name: cgroup
- path: /sys/fs/cgroup
-- image: jrei/systemd-centos:8
- name: systemd-centos
- privileged: true
- volumes:
- - name: cgroup
- path: /sys/fs/cgroup
-steps:
-- commands:
- - if [ "$${#TEST_SECRET}" -eq 0 ]; then
- - ' echo "Missing a secret to run this pipeline. This branch needs to be re-pushed
- as a branch in main grafana/loki repository in order to run." && exit 78'
- - fi
- environment:
- TEST_SECRET:
- from_secret: gpg_private_key
- image: alpine
- name: skip pipeline if missing secret
-- commands:
- - apk add --no-cache bash git
- - git fetch origin --tags
- image: alpine
- name: fetch-tags
-- commands:
- - printf "%s" "$NFPM_SIGNING_KEY" > $NFPM_SIGNING_KEY_FILE
- environment:
- NFPM_SIGNING_KEY:
- from_secret: gpg_private_key
- NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key
- image: grafana/loki-build-image:0.33.0
- name: write-key
-- commands:
- - make BUILD_IN_CONTAINER=false packages
- environment:
- NFPM_PASSPHRASE:
- from_secret: gpg_passphrase
- NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key
- image: grafana/loki-build-image:0.33.0
- name: test packaging
-- commands:
- - ./tools/packaging/verify-deb-install.sh
- image: docker
- name: test deb package
- privileged: true
- volumes:
- - name: docker
- path: /var/run/docker.sock
-- commands:
- - ./tools/packaging/verify-rpm-install.sh
- image: docker
- name: test rpm package
- privileged: true
- volumes:
- - name: docker
- path: /var/run/docker.sock
-- commands:
- - make BUILD_IN_CONTAINER=false publish
- environment:
- GITHUB_TOKEN:
- from_secret: github_token
- NFPM_PASSPHRASE:
- from_secret: gpg_passphrase
- NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key
- image: grafana/loki-build-image:0.33.0
- name: publish
- when:
- event:
- - tag
-trigger:
- event:
- - pull_request
- - tag
- ref:
- - refs/heads/main
- - refs/heads/k???
- - refs/tags/v*
- - refs/pull/*/head
-volumes:
-- host:
- path: /sys/fs/cgroup
- name: cgroup
-- host:
- path: /var/run/docker.sock
- name: docker
----
kind: pipeline
name: docker-driver
steps:
@@ -1827,8 +1159,6 @@ volumes:
path: /var/run/docker.sock
name: docker
---
-depends_on:
-- check
kind: pipeline
name: lambda-promtail-amd64
platform:
@@ -1851,25 +1181,6 @@ steps:
from_secret: ecr_key
image: alpine
name: skip pipeline if missing secret
-- depends_on:
- - image-tag
- - skip pipeline if missing secret
- image: cstyan/ecr
- name: build-lambda-promtail-image
- privileged: true
- settings:
- access_key:
- from_secret: ecr_key
- dockerfile: tools/lambda-promtail/Dockerfile
- dry_run: true
- region: us-east-1
- registry: public.ecr.aws/grafana
- repo: public.ecr.aws/grafana/lambda-promtail
- secret_key:
- from_secret: ecr_secret_key
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: cstyan/ecr
@@ -1896,8 +1207,6 @@ trigger:
- refs/tags/v*
- refs/pull/*/head
---
-depends_on:
-- check
kind: pipeline
name: lambda-promtail-arm64
platform:
@@ -1920,25 +1229,6 @@ steps:
from_secret: ecr_key
image: alpine
name: skip pipeline if missing secret
-- depends_on:
- - image-tag
- - skip pipeline if missing secret
- image: cstyan/ecr
- name: build-lambda-promtail-image
- privileged: true
- settings:
- access_key:
- from_secret: ecr_key
- dockerfile: tools/lambda-promtail/Dockerfile
- dry_run: true
- region: us-east-1
- registry: public.ecr.aws/grafana
- repo: public.ecr.aws/grafana/lambda-promtail
- secret_key:
- from_secret: ecr_secret_key
- when:
- event:
- - pull_request
- depends_on:
- image-tag
image: cstyan/ecr
@@ -2072,6 +1362,6 @@ kind: secret
name: gpg_private_key
---
kind: signature
-hmac: 51861919f0ba5370a152bdb9267828c742f2042819fb01388c6d23bf44e3cbb7
+hmac: 32b44aecaad0258ed9494225595e1016a56bea960bcd0b15b2db3449bed957e0
...
diff --git a/.github/jsonnetfile.json b/.github/jsonnetfile.json
new file mode 100644
index 000000000000..cd4469eb6e50
--- /dev/null
+++ b/.github/jsonnetfile.json
@@ -0,0 +1,15 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/loki-release.git",
+ "subdir": "workflows"
+ }
+ },
+ "version": "release-1.10.x"
+ }
+ ],
+ "legacyImports": true
+}
diff --git a/.github/jsonnetfile.lock.json b/.github/jsonnetfile.lock.json
new file mode 100644
index 000000000000..ee1f7b9596b4
--- /dev/null
+++ b/.github/jsonnetfile.lock.json
@@ -0,0 +1,16 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/loki-release.git",
+ "subdir": "workflows"
+ }
+ },
+ "version": "c005223f58b83f288b655dde5bcfeff7490c7aa5",
+ "sum": "5K+r6Bsb8JMR1ytQjSObjvHFpH7SJBi5D4ysSwvC4/g="
+ }
+ ],
+ "legacyImports": false
+}
diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet
new file mode 100644
index 000000000000..ae1f868fa651
--- /dev/null
+++ b/.github/release-workflows.jsonnet
@@ -0,0 +1,58 @@
+local lokiRelease = import 'workflows/main.jsonnet';
+local build = lokiRelease.build;
+{
+ 'patch-release-pr.yml': std.manifestYamlDoc(
+ lokiRelease.releasePRWorkflow(
+ imageJobs={
+ loki: build.image('loki', 'cmd/loki'),
+ fluentd: build.image('fluentd', 'clients/cmd/fluentd', platform=['linux/amd64']),
+ 'fluent-bit': build.image('fluent-bit', 'clients/cmd/fluent-bit', platform=['linux/amd64']),
+ logstash: build.image('logstash', 'clients/cmd/logstash', platform=['linux/amd64']),
+ logcli: build.image('logcli', 'cmd/logcli'),
+ 'loki-canary': build.image('loki-canary', 'cmd/loki-canary'),
+ 'loki-canary-boringcrypto': build.image('loki-canary-boringcrypto', 'cmd/loki-canary-boringcrypto'),
+ 'loki-operator': build.image('loki-operator', 'operator', context='release/operator', platform=['linux/amd64']),
+ promtail: build.image('promtail', 'clients/cmd/promtail'),
+ querytee: build.image('querytee', 'cmd/querytee', platform=['linux/amd64']),
+ },
+ branches=['release-[0-9]+.[0-9]+.x'],
+ checkTemplate='grafana/loki-release/.github/workflows/check.yml@release-1.10.x',
+ imagePrefix='grafana',
+ releaseRepo='grafana/loki',
+ skipArm=false,
+ skipValidation=false,
+ versioningStrategy='always-bump-patch',
+ ), false, false
+ ),
+ 'minor-release-pr.yml': std.manifestYamlDoc(
+ lokiRelease.releasePRWorkflow(
+ imageJobs={
+ loki: build.image('loki', 'cmd/loki'),
+ fluentd: build.image('fluentd', 'clients/cmd/fluentd', platform=['linux/amd64']),
+ 'fluent-bit': build.image('fluent-bit', 'clients/cmd/fluent-bit', platform=['linux/amd64']),
+ logstash: build.image('logstash', 'clients/cmd/logstash', platform=['linux/amd64']),
+ logcli: build.image('logcli', 'cmd/logcli'),
+ 'loki-canary': build.image('loki-canary', 'cmd/loki-canary'),
+ 'loki-canary-boringcrypto': build.image('loki-canary-boringcrypto', 'cmd/loki-canary-boringcrypto'),
+ 'loki-operator': build.image('loki-operator', 'operator', context='release/operator', platform=['linux/amd64']),
+ promtail: build.image('promtail', 'clients/cmd/promtail'),
+ querytee: build.image('querytee', 'cmd/querytee', platform=['linux/amd64']),
+ },
+ branches=['k[0-9]+'],
+ checkTemplate='grafana/loki-release/.github/workflows/check.yml@release-1.10.x',
+ imagePrefix='grafana',
+ releaseRepo='grafana/loki',
+ skipArm=false,
+ skipValidation=false,
+ versioningStrategy='always-bump-minor',
+ ), false, false
+ ),
+ 'release.yml': std.manifestYamlDoc(
+ lokiRelease.releaseWorkflow(
+ branches=['release-[0-9]+.[0-9]+.x', 'k[0-9]+'],
+ getDockerCredsFromVault=true,
+ imagePrefix='grafana',
+ releaseRepo='grafana/loki',
+ ), false, false
+ ),
+}
diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet
new file mode 100644
index 000000000000..cdd6b82463e4
--- /dev/null
+++ b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet
@@ -0,0 +1,154 @@
+local common = import 'common.libsonnet';
+local job = common.job;
+local step = common.step;
+local releaseStep = common.releaseStep;
+local releaseLibStep = common.releaseLibStep;
+
+{
+ image: function(
+ name,
+ path,
+ context='release',
+ platform=[
+ 'linux/amd64',
+ 'linux/arm64',
+ 'linux/arm',
+ ]
+ )
+ job.new()
+ + job.withStrategy({
+ 'fail-fast': true,
+ matrix: {
+ platform: platform,
+ },
+ })
+ + job.withSteps([
+ common.fetchReleaseLib,
+ common.fetchReleaseRepo,
+ common.setupNode,
+ common.googleAuth,
+
+ step.new('Set up QEMU', 'docker/setup-qemu-action@v3'),
+ step.new('set up docker buildx', 'docker/setup-buildx-action@v3'),
+
+ releaseStep('parse image platform')
+ + step.withId('platform')
+ + step.withRun(|||
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ |||),
+
+ step.new('Build and export', 'docker/build-push-action@v5')
+ + step.withTimeoutMinutes(25)
+ + step.withIf('${{ fromJSON(needs.version.outputs.pr_created) }}')
+ + step.with({
+ context: context,
+ file: 'release/%s/Dockerfile' % path,
+ platforms: '${{ matrix.platform }}',
+ tags: '${{ env.IMAGE_PREFIX }}/%s:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}' % [name],
+ outputs: 'type=docker,dest=release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar' % name,
+ }),
+ step.new('upload artifacts', 'google-github-actions/upload-cloud-storage@v2')
+ + step.withIf('${{ fromJSON(needs.version.outputs.pr_created) }}')
+ + step.with({
+ path: 'release/images/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar' % name,
+ destination: 'loki-build-artifacts/${{ github.sha }}/images', //TODO: make bucket configurable
+ process_gcloudignore: false,
+ }),
+ ]),
+
+ version:
+ job.new()
+ + job.withSteps([
+ common.fetchReleaseLib,
+ common.fetchReleaseRepo,
+ common.setupNode,
+ common.extractBranchName,
+ releaseLibStep('get release version')
+ + step.withId('version')
+ + step.withRun(|||
+ npm install
+ npm exec -- release-please release-pr \
+ --consider-all-branches \
+ --dry-run \
+ --dry-run-output release.json \
+ --release-type simple \
+ --repo-url="${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ steps.extract_branch.outputs.branch }}" \
+ --token="${{ secrets.GH_TOKEN }}" \
+ --versioning-strategy "${{ env.VERSIONING_STRATEGY }}"
+
+ if [[ `jq length release.json` -gt 1 ]]; then
+ echo 'release-please would create more than 1 PR, so cannot determine correct version'
+ echo "pr_created=false" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+
+ if [[ `jq length release.json` -eq 0 ]]; then
+ echo "pr_created=false" >> $GITHUB_OUTPUT
+ else
+ version="$(npm run --silent get-version)"
+ echo "Parsed version: ${version}"
+ echo "version=${version}" >> $GITHUB_OUTPUT
+ echo "pr_created=true" >> $GITHUB_OUTPUT
+ fi
+ |||),
+ ])
+ + job.withOutputs({
+ version: '${{ steps.version.outputs.version }}',
+ pr_created: '${{ steps.version.outputs.pr_created }}',
+ }),
+
+ dist: function(buildImage, skipArm=true)
+ job.new()
+ + job.withSteps([
+ common.fetchReleaseRepo,
+ common.googleAuth,
+ step.new('get nfpm signing keys', 'grafana/shared-workflows/actions/get-vault-secrets@main')
+ + step.withId('get-secrets')
+ + step.with({
+ common_secrets: |||
+ NFPM_SIGNING_KEY=packages-gpg:private-key
+ NFPM_PASSPHRASE=packages-gpg:passphrase
+ |||,
+ }),
+
+ releaseStep('build artifacts')
+ + step.withEnv({
+ BUILD_IN_CONTAINER: false,
+ DRONE_TAG: '${{ needs.version.outputs.version }}',
+ IMAGE_TAG: '${{ needs.version.outputs.version }}',
+ NFPM_SIGNING_KEY_FILE: 'nfpm-private-key.key',
+ SKIP_ARM: skipArm,
+ })
+ //TODO: the workdir here is loki specific
+ + step.withRun(|||
+ cat < $NFPM_SIGNING_KEY_FILE
+ make dist packages
+ EOF
+ ||| % buildImage),
+
+ step.new('upload build artifacts', 'google-github-actions/upload-cloud-storage@v2')
+ + step.with({
+ path: 'release/dist',
+ destination: 'loki-build-artifacts/${{ github.sha }}', //TODO: make bucket configurable
+ process_gcloudignore: false,
+ }),
+ ]),
+}
diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet
new file mode 100644
index 000000000000..e3346f2bd5e4
--- /dev/null
+++ b/.github/vendor/github.com/grafana/loki-release/workflows/common.libsonnet
@@ -0,0 +1,124 @@
+{
+ step: {
+ new: function(name, uses=null) {
+ name: name,
+ } + if uses != null then {
+ uses: uses,
+ } else {},
+ with: function(with) {
+ with+: with,
+ },
+ withRun: function(run) {
+ run: run,
+ },
+ withId: function(id) {
+ id: id,
+ },
+ withWorkingDirectory: function(workingDirectory) {
+ 'working-directory': workingDirectory,
+ },
+ withIf: function(_if) {
+ 'if': _if,
+ },
+ withEnv: function(env) {
+ env: env,
+ },
+ withSecrets: function(env) {
+ secrets: env,
+ },
+ withTimeoutMinutes: function(timeout) {
+ 'timeout-minutes': timeout,
+ },
+ },
+ job: {
+ new: function(runsOn='ubuntu-latest') {
+ 'runs-on': runsOn,
+ },
+ with: function(with) {
+ with+: with,
+ },
+ withUses: function(uses) {
+ uses: uses,
+ },
+ withSteps: function(steps) {
+ steps: steps,
+ },
+ withStrategy: function(strategy) {
+ strategy: strategy,
+ },
+ withNeeds: function(needs) {
+ needs: needs,
+ },
+ withIf: function(_if) {
+ 'if': _if,
+ },
+ withOutputs: function(outputs) {
+ outputs: outputs,
+ },
+ withContainer: function(container) {
+ container: container,
+ },
+ withEnv: function(env) {
+ env: env,
+ },
+ withSecrets: function(env) {
+ secrets: env,
+ },
+ },
+
+ releaseStep: function(name, uses=null) $.step.new(name, uses) +
+ $.step.withWorkingDirectory('release'),
+
+ releaseLibStep: function(name, uses=null) $.step.new(name, uses) +
+ $.step.withWorkingDirectory('lib'),
+
+ checkout:
+ $.step.new('checkout', 'actions/checkout@v4'),
+
+ fetchReleaseRepo:
+ $.step.new('pull code to release', 'actions/checkout@v4')
+ + $.step.with({
+ repository: '${{ env.RELEASE_REPO }}',
+ path: 'release',
+ }),
+ fetchReleaseLib:
+ $.step.new('pull release library code', 'actions/checkout@v4')
+ + $.step.with({
+ repository: 'grafana/loki-release',
+ path: 'lib',
+ }),
+
+ setupNode: $.step.new('setup node', 'actions/setup-node@v4')
+ + $.step.with({
+ 'node-version': 20,
+ }),
+
+ makeTarget: function(target) 'make %s' % target,
+
+ alwaysGreen: {
+ steps: [
+ $.step.new('always green')
+ + $.step.withRun('echo "always green"'),
+ ],
+ },
+
+ googleAuth: $.step.new('auth gcs', 'google-github-actions/auth@v2')
+ + $.step.with({
+ credentials_json: '${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}',
+ }),
+ setupGoogleCloudSdk: $.step.new('Set up Cloud SDK', 'google-github-actions/setup-gcloud@v2')
+ + $.step.with({
+ version: '>= 452.0.0',
+ }),
+
+ extractBranchName: $.releaseStep('extract branch name')
+ + $.step.withId('extract_branch')
+ + $.step.withRun(|||
+ echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
+ |||),
+
+ fixDubiousOwnership: $.step.new('fix git dubious ownership')
+ + $.step.withRun(|||
+ git config --global --add safe.directory "$GITHUB_WORKSPACE"
+ |||),
+}
diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet
new file mode 100644
index 000000000000..0a033b81221f
--- /dev/null
+++ b/.github/vendor/github.com/grafana/loki-release/workflows/main.jsonnet
@@ -0,0 +1,111 @@
+{
+ common: import 'common.libsonnet',
+ job: $.common.job,
+ step: $.common.step,
+ build: import 'build.libsonnet',
+ release: import 'release.libsonnet',
+ validate: import 'validate.libsonnet',
+ releasePRWorkflow: function(
+ branches=['release-[0-9]+.[0-9]+.x', 'k[0-9]+'],
+ buildImage='grafana/loki-build-image:0.33.0',
+ checkTemplate='./.github/workflows/check.yml',
+ dockerUsername='grafana',
+ imageJobs={},
+ imagePrefix='grafana',
+ releaseRepo='grafana/loki-release',
+ skipArm=true,
+ skipValidation=false,
+ versioningStrategy='always-bump-patch',
+ ) {
+ name: 'create release PR',
+ on: {
+ push: {
+ branches: branches,
+ },
+ },
+ permissions: {
+ contents: 'write',
+ 'pull-requests': 'write',
+ 'id-token': 'write',
+ },
+ concurrency: {
+ group: 'create-release-pr-${{ github.sha }}',
+ },
+ env: {
+ RELEASE_REPO: releaseRepo,
+ DOCKER_USERNAME: dockerUsername,
+ IMAGE_PREFIX: imagePrefix,
+ SKIP_VALIDATION: skipValidation,
+ VERSIONING_STRATEGY: versioningStrategy,
+ },
+ local validationSteps = ['check'],
+ jobs: {
+ check: {} + $.job.withUses(checkTemplate)
+ + $.job.with({
+ skip_validation: skipValidation,
+ }),
+ version: $.build.version + $.common.job.withNeeds(validationSteps),
+ dist: $.build.dist(buildImage, skipArm) + $.common.job.withNeeds(['version']),
+ } + std.mapWithKey(function(name, job) job + $.common.job.withNeeds(['version']), imageJobs) + {
+ local buildImageSteps = ['dist'] + std.objectFields(imageJobs),
+ 'create-release-pr': $.release.createReleasePR + $.common.job.withNeeds(buildImageSteps),
+ },
+ },
+ releaseWorkflow: function(
+ releaseRepo='grafana/loki-release',
+ dockerUsername='grafana',
+ imagePrefix='grafana',
+ branches=['release-[0-9].[0-9].x', 'k[0-9]*'],
+ getDockerCredsFromVault=false
+ ) {
+ name: 'create release',
+ on: {
+ push: {
+ branches: branches,
+ },
+ },
+ permissions: {
+ contents: 'write',
+ 'pull-requests': 'write',
+ 'id-token': 'write',
+ },
+ concurrency: {
+ group: 'create-release-${{ github.sha }}',
+ },
+ env: {
+ RELEASE_REPO: releaseRepo,
+ IMAGE_PREFIX: imagePrefix,
+ },
+ jobs: {
+ shouldRelease: $.release.shouldRelease,
+ createRelease: $.release.createRelease,
+ publishImages: $.release.publishImages(getDockerCredsFromVault, dockerUsername),
+ },
+ },
+ check: function(
+ buildImage='grafana/loki-build-image:0.33.0',
+ ) {
+ name: 'check',
+ on: {
+ workflow_call: {
+ inputs: {
+ skip_validation: {
+ default: false,
+ description: 'skip validation steps',
+ required: false,
+ type: 'boolean',
+ },
+ },
+ },
+ },
+ permissions: {
+ contents: 'write',
+ 'pull-requests': 'write',
+ 'id-token': 'write',
+ },
+ concurrency: {
+ group: 'check-${{ github.sha }}',
+ },
+ jobs: $.validate(buildImage),
+ },
+}
diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet
new file mode 100644
index 000000000000..6bf2daa8f033
--- /dev/null
+++ b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet
@@ -0,0 +1,144 @@
+local common = import 'common.libsonnet';
+local job = common.job;
+local step = common.step;
+local releaseStep = common.releaseStep;
+local releaseLibStep = common.releaseLibStep;
+
+// DO NOT MODIFY THIS FOOTER TEMPLATE
+// This template is matched by the should-release action to detect the correct
+// sha to release and pull aritfacts from. If you need to change this, make sure
+// to change it in both places.
+//TODO: make bucket configurable
+local pullRequestFooter = 'Merging this PR will release the [artifacts](https://console.cloud.google.com/storage/browser/loki-build-artifacts/${SHA}) of ${SHA}';
+
+{
+ createReleasePR:
+ job.new()
+ + job.withSteps([
+ common.fetchReleaseRepo,
+ common.fetchReleaseLib,
+ common.setupNode,
+ common.extractBranchName,
+
+ releaseLibStep('release please')
+ + step.withId('release')
+ + step.withEnv({
+ SHA: '${{ github.sha }}',
+ })
+ //TODO make bucket configurable
+ //TODO make a type/release in the backport action
+ //TODO backport action should not bring over autorelease: pending label
+ + step.withRun(|||
+ npm install
+ echo "Pull request footer: %s"
+ npm exec -- release-please release-pr \
+ --consider-all-branches \
+ --label "backport main,autorelease: pending,type/docs" \
+ --pull-request-footer "%s" \
+ --release-type simple \
+ --repo-url "${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ steps.extract_branch.outputs.branch }}" \
+ --token "${{ secrets.GH_TOKEN }}" \
+ --versioning-strategy "${{ env.VERSIONING_STRATEGY }}" \
+ --separate-pull-requests false \
+ --debug
+ ||| % [pullRequestFooter, pullRequestFooter]),
+ ]),
+
+ shouldRelease: job.new()
+ + job.withSteps([
+ common.fetchReleaseRepo,
+ common.fetchReleaseLib,
+ common.extractBranchName,
+
+ step.new('should a release be created?', './lib/actions/should-release')
+ + step.withId('should_release')
+ + step.with({
+ baseBranch: '${{ steps.extract_branch.outputs.branch }}',
+ }),
+ ])
+ + job.withOutputs({
+ shouldRelease: '${{ steps.should_release.outputs.shouldRelease }}',
+ sha: '${{ steps.should_release.outputs.sha }}',
+ name: '${{ steps.should_release.outputs.name }}',
+ branch: '${{ steps.extract_branch.outputs.branch }}',
+
+ }),
+ createRelease: job.new()
+ + job.withNeeds(['shouldRelease'])
+ + job.withIf('${{ fromJSON(needs.shouldRelease.outputs.shouldRelease) }}')
+ + job.withSteps([
+ common.fetchReleaseRepo,
+ common.fetchReleaseLib,
+ common.setupNode,
+ common.googleAuth,
+ common.setupGoogleCloudSdk,
+
+ // exits with code 1 if the url does not match
+ // meaning there are no artifacts for that sha
+ // we need to handle this if we're going to run this pipeline on every merge to main
+ releaseStep('download binaries')
+ + step.withRun(|||
+ echo "downloading binaries to $(pwd)/dist"
+ gsutil cp -r gs://loki-build-artifacts/${{ needs.shouldRelease.outputs.sha }}/dist .
+ |||),
+
+ releaseLibStep('create release')
+ + step.withId('release')
+ + step.withRun(|||
+ npm install
+ npm exec -- release-please github-release \
+ --draft \
+ --release-type simple \
+ --repo-url="${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ needs.shouldRelease.outputs.branch }}" \
+ --token="${{ secrets.GH_TOKEN }}"
+ |||),
+
+ releaseStep('upload artifacts')
+ + step.withId('upload')
+ + step.withEnv({
+ GH_TOKEN: '${{ secrets.GH_TOKEN }}',
+ })
+ + step.withRun(|||
+ gh release upload ${{ needs.shouldRelease.outputs.name }} dist/*
+ gh release edit ${{ needs.shouldRelease.outputs.name }} --draft=false
+ |||),
+ ])
+ + job.withOutputs({
+ sha: '${{ needs.shouldRelease.outputs.sha }}',
+ }),
+
+ publishImages: function(getDockerCredsFromVault=false, dockerUsername='grafanabot')
+ job.new()
+ + job.withNeeds(['createRelease'])
+ + job.withSteps(
+ [
+ common.fetchReleaseLib,
+ common.googleAuth,
+ common.setupGoogleCloudSdk,
+ step.new('Set up QEMU', 'docker/setup-qemu-action@v3'),
+ step.new('set up docker buildx', 'docker/setup-buildx-action@v3'),
+ ] + (if getDockerCredsFromVault then [
+ step.new('Login to DockerHub (from vault)', 'grafana/shared-workflows/actions/dockerhub-login@main'),
+ ] else [
+ step.new('Login to DockerHub (from secrets)', 'docker/login-action@v3')
+ + step.with({
+ username: dockerUsername,
+ password: '${{ secrets.DOCKER_PASSWORD }}',
+ }),
+ ]) +
+ [
+ step.new('download images')
+ + step.withRun(|||
+ echo "downloading images to $(pwd)/images"
+ gsutil cp -r gs://loki-build-artifacts/${{ needs.createRelease.outputs.sha }}/images .
+ |||),
+ step.new('publish docker images', './lib/actions/push-images')
+ + step.with({
+ imageDir: 'images',
+ imagePrefix: '${{ env.IMAGE_PREFIX }}',
+ }),
+ ]
+ ),
+}
diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet
new file mode 100644
index 000000000000..477e077d8554
--- /dev/null
+++ b/.github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet
@@ -0,0 +1,114 @@
+local common = import 'common.libsonnet';
+local job = common.job;
+local step = common.step;
+local releaseStep = common.releaseStep;
+
+local setupValidationDeps = function(job) job {
+ steps: [
+ common.checkout,
+ common.fetchReleaseLib,
+ common.fixDubiousOwnership,
+ step.new('install tar')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withRun(|||
+ apt update
+ apt install -qy tar xz-utils
+ |||),
+ step.new('install shellcheck', './lib/actions/install-binary')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.with({
+ binary: 'shellcheck',
+ version: '0.9.0',
+ download_url: 'https://github.com/koalaman/shellcheck/releases/download/v${version}/shellcheck-v${version}.linux.x86_64.tar.xz',
+ tarball_binary_path: '*/${binary}',
+ smoke_test: '${binary} --version',
+ tar_args: 'xvf',
+ }),
+ step.new('install jsonnetfmt', './lib/actions/install-binary')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.with({
+ binary: 'jsonnetfmt',
+ version: '0.18.0',
+ download_url: 'https://github.com/google/go-jsonnet/releases/download/v${version}/go-jsonnet_${version}_Linux_x86_64.tar.gz',
+ tarball_binary_path: '${binary}',
+ smoke_test: '${binary} --version',
+ }),
+ ] + job.steps,
+};
+
+local validationJob = function(buildImage) job.new()
+ + job.withContainer({
+ image: buildImage,
+ })
+ + job.withEnv({
+ BUILD_IN_CONTAINER: false,
+ SKIP_VALIDATION: '${{ inputs.skip_validation }}',
+ });
+
+
+function(buildImage) {
+ local validationMakeStep = function(name, target)
+ step.new(name)
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withRun(common.makeTarget(target)),
+
+ test: setupValidationDeps(
+ validationJob(buildImage)
+ + job.withSteps([
+ validationMakeStep('test', 'test'),
+ ])
+ ),
+
+ lint: setupValidationDeps(
+ validationJob(buildImage)
+ + job.withSteps([
+ validationMakeStep('lint', 'lint'),
+ validationMakeStep('lint jsonnet', 'lint-jsonnet'),
+ validationMakeStep('lint scripts', 'lint-scripts'),
+ validationMakeStep('format', 'check-format'),
+ ]) + {
+ steps+: [
+ step.new('golangci-lint', 'golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.with({
+ version: 'v1.55.1',
+ 'only-new-issues': true,
+ }),
+ ],
+ }
+ ),
+
+ check: setupValidationDeps(
+ validationJob(buildImage)
+ + job.withSteps([
+ validationMakeStep('check generated files', 'check-generated-files'),
+ validationMakeStep('check mod', 'check-mod'),
+ validationMakeStep('check docs', 'check-doc'),
+ validationMakeStep('validate example configs', 'validate-example-configs'),
+ validationMakeStep('validate dev cluster config', 'validate-dev-cluster-config'),
+ validationMakeStep('check example config docs', 'check-example-config-doc'),
+ validationMakeStep('check helm reference doc', 'documentation-helm-reference-check'),
+ validationMakeStep('check drone drift', 'check-drone-drift'),
+ ]) + {
+ steps+: [
+ step.new('build docs website')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withRun(|||
+ cat <> $GITHUB_OUTPUT
+ working-directory: "release"
+ - env:
+ SHA: "${{ github.sha }}"
+ id: "release"
+ name: "release please"
+ run: |
+ npm install
+ echo "Pull request footer: Merging this PR will release the [artifacts](https://console.cloud.google.com/storage/browser/loki-build-artifacts/${SHA}) of ${SHA}"
+ npm exec -- release-please release-pr \
+ --consider-all-branches \
+ --label "backport main,autorelease: pending,type/docs" \
+ --pull-request-footer "Merging this PR will release the [artifacts](https://console.cloud.google.com/storage/browser/loki-build-artifacts/${SHA}) of ${SHA}" \
+ --release-type simple \
+ --repo-url "${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ steps.extract_branch.outputs.branch }}" \
+ --token "${{ secrets.GH_TOKEN }}" \
+ --versioning-strategy "${{ env.VERSIONING_STRATEGY }}" \
+ --separate-pull-requests false \
+ --debug
+ working-directory: "lib"
+ dist:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - id: "get-secrets"
+ name: "get nfpm signing keys"
+ uses: "grafana/shared-workflows/actions/get-vault-secrets@main"
+ with:
+ common_secrets: |
+ NFPM_SIGNING_KEY=packages-gpg:private-key
+ NFPM_PASSPHRASE=packages-gpg:passphrase
+ - env:
+ BUILD_IN_CONTAINER: false
+ DRONE_TAG: "${{ needs.version.outputs.version }}"
+ IMAGE_TAG: "${{ needs.version.outputs.version }}"
+ NFPM_SIGNING_KEY_FILE: "nfpm-private-key.key"
+ SKIP_ARM: false
+ name: "build artifacts"
+ run: |
+ cat < $NFPM_SIGNING_KEY_FILE
+ make dist packages
+ EOF
+ working-directory: "release"
+ - name: "upload build artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}"
+ path: "release/dist"
+ process_gcloudignore: false
+ fluent-bit:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/fluent-bit/Dockerfile"
+ outputs: "type=docker,dest=release/images/fluent-bit-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/fluent-bit:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/fluent-bit-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ fluentd:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/fluentd/Dockerfile"
+ outputs: "type=docker,dest=release/images/fluentd-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/fluentd:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/fluentd-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ logcli:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/logcli/Dockerfile"
+ outputs: "type=docker,dest=release/images/logcli-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/logcli:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/logcli-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ logstash:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/logstash/Dockerfile"
+ outputs: "type=docker,dest=release/images/logstash-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/logstash:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/logstash-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ loki:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/loki/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ loki-canary:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/loki-canary/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-canary-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki-canary:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-canary-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ loki-canary-boringcrypto:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/loki-canary-boringcrypto/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-canary-boringcrypto-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki-canary-boringcrypto:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-canary-boringcrypto-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ loki-operator:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release/operator"
+ file: "release/operator/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-operator-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki-operator:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-operator-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ promtail:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/promtail/Dockerfile"
+ outputs: "type=docker,dest=release/images/promtail-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/promtail:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/promtail-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ querytee:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/querytee/Dockerfile"
+ outputs: "type=docker,dest=release/images/querytee-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/querytee:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/querytee-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ version:
+ needs:
+ - "check"
+ outputs:
+ pr_created: "${{ steps.version.outputs.pr_created }}"
+ version: "${{ steps.version.outputs.version }}"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - id: "extract_branch"
+ name: "extract branch name"
+ run: |
+ echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - id: "version"
+ name: "get release version"
+ run: |
+ npm install
+ npm exec -- release-please release-pr \
+ --consider-all-branches \
+ --dry-run \
+ --dry-run-output release.json \
+ --release-type simple \
+ --repo-url="${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ steps.extract_branch.outputs.branch }}" \
+ --token="${{ secrets.GH_TOKEN }}" \
+ --versioning-strategy "${{ env.VERSIONING_STRATEGY }}"
+
+ if [[ `jq length release.json` -gt 1 ]]; then
+ echo 'release-please would create more than 1 PR, so cannot determine correct version'
+ echo "pr_created=false" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+
+ if [[ `jq length release.json` -eq 0 ]]; then
+ echo "pr_created=false" >> $GITHUB_OUTPUT
+ else
+ version="$(npm run --silent get-version)"
+ echo "Parsed version: ${version}"
+ echo "version=${version}" >> $GITHUB_OUTPUT
+ echo "pr_created=true" >> $GITHUB_OUTPUT
+ fi
+ working-directory: "lib"
+name: "create release PR"
+"on":
+ push:
+ branches:
+ - "k[0-9]+"
+permissions:
+ contents: "write"
+ id-token: "write"
+ pull-requests: "write"
diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml
index 001b00d93b66..411fff87d410 100644
--- a/.github/workflows/patch-release-pr.yml
+++ b/.github/workflows/patch-release-pr.yml
@@ -1,21 +1,773 @@
----
-name: 'create release PR for patch releases'
-on:
- push:
- branches:
- - 'release-[0-9].[0-9].x'
- workflow_dispatch: {}
-permissions:
- contents: 'write'
- issues: 'write'
- pull-requests: 'write'
+concurrency:
+ group: "create-release-pr-${{ github.sha }}"
+env:
+ DOCKER_USERNAME: "grafana"
+ IMAGE_PREFIX: "grafana"
+ RELEASE_REPO: "grafana/loki"
+ SKIP_VALIDATION: false
+ VERSIONING_STRATEGY: "always-bump-patch"
jobs:
- create-release-pr:
- uses: github/loki-release/.github/workflows/release-pr.yml@main
+ check:
+ uses: "grafana/loki-release/.github/workflows/check.yml@release-1.10.x"
with:
- release_repo: grafana/loki
skip_validation: false
- versioning_strategy: always-bump-patch
- secrets:
- GCS_SERVICE_ACCOUNT_KEY: '${{ secrets.BACKEND_ENTERPRISE_DRONE }}'
- GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
+ create-release-pr:
+ needs:
+ - "dist"
+ - "fluent-bit"
+ - "fluentd"
+ - "logcli"
+ - "logstash"
+ - "loki"
+ - "loki-canary"
+ - "loki-canary-boringcrypto"
+ - "loki-operator"
+ - "promtail"
+ - "querytee"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - id: "extract_branch"
+ name: "extract branch name"
+ run: |
+ echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - env:
+ SHA: "${{ github.sha }}"
+ id: "release"
+ name: "release please"
+ run: |
+ npm install
+ echo "Pull request footer: Merging this PR will release the [artifacts](https://console.cloud.google.com/storage/browser/loki-build-artifacts/${SHA}) of ${SHA}"
+ npm exec -- release-please release-pr \
+ --consider-all-branches \
+ --label "backport main,autorelease: pending,type/docs" \
+ --pull-request-footer "Merging this PR will release the [artifacts](https://console.cloud.google.com/storage/browser/loki-build-artifacts/${SHA}) of ${SHA}" \
+ --release-type simple \
+ --repo-url "${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ steps.extract_branch.outputs.branch }}" \
+ --token "${{ secrets.GH_TOKEN }}" \
+ --versioning-strategy "${{ env.VERSIONING_STRATEGY }}" \
+ --separate-pull-requests false \
+ --debug
+ working-directory: "lib"
+ dist:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - id: "get-secrets"
+ name: "get nfpm signing keys"
+ uses: "grafana/shared-workflows/actions/get-vault-secrets@main"
+ with:
+ common_secrets: |
+ NFPM_SIGNING_KEY=packages-gpg:private-key
+ NFPM_PASSPHRASE=packages-gpg:passphrase
+ - env:
+ BUILD_IN_CONTAINER: false
+ DRONE_TAG: "${{ needs.version.outputs.version }}"
+ IMAGE_TAG: "${{ needs.version.outputs.version }}"
+ NFPM_SIGNING_KEY_FILE: "nfpm-private-key.key"
+ SKIP_ARM: false
+ name: "build artifacts"
+ run: |
+ cat < $NFPM_SIGNING_KEY_FILE
+ make dist packages
+ EOF
+ working-directory: "release"
+ - name: "upload build artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}"
+ path: "release/dist"
+ process_gcloudignore: false
+ fluent-bit:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/fluent-bit/Dockerfile"
+ outputs: "type=docker,dest=release/images/fluent-bit-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/fluent-bit:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/fluent-bit-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ fluentd:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/fluentd/Dockerfile"
+ outputs: "type=docker,dest=release/images/fluentd-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/fluentd:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/fluentd-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ logcli:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/logcli/Dockerfile"
+ outputs: "type=docker,dest=release/images/logcli-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/logcli:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/logcli-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ logstash:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/logstash/Dockerfile"
+ outputs: "type=docker,dest=release/images/logstash-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/logstash:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/logstash-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ loki:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/loki/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ loki-canary:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/loki-canary/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-canary-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki-canary:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-canary-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ loki-canary-boringcrypto:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/loki-canary-boringcrypto/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-canary-boringcrypto-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki-canary-boringcrypto:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-canary-boringcrypto-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ loki-operator:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release/operator"
+ file: "release/operator/Dockerfile"
+ outputs: "type=docker,dest=release/images/loki-operator-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/loki-operator:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/loki-operator-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ promtail:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/clients/cmd/promtail/Dockerfile"
+ outputs: "type=docker,dest=release/images/promtail-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/promtail:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/promtail-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ - "linux/arm64"
+ - "linux/arm"
+ querytee:
+ needs:
+ - "version"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - id: "platform"
+ name: "parse image platform"
+ run: |
+ mkdir -p images
+
+ platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
+ echo "platform=${platform}" >> $GITHUB_OUTPUT
+ echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "Build and export"
+ timeout-minutes: 25
+ uses: "docker/build-push-action@v5"
+ with:
+ context: "release"
+ file: "release/cmd/querytee/Dockerfile"
+ outputs: "type=docker,dest=release/images/querytee-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ platforms: "${{ matrix.platform }}"
+ tags: "${{ env.IMAGE_PREFIX }}/querytee:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
+ - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
+ name: "upload artifacts"
+ uses: "google-github-actions/upload-cloud-storage@v2"
+ with:
+ destination: "loki-build-artifacts/${{ github.sha }}/images"
+ path: "release/images/querytee-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
+ process_gcloudignore: false
+ strategy:
+ fail-fast: true
+ matrix:
+ platform:
+ - "linux/amd64"
+ version:
+ needs:
+ - "check"
+ outputs:
+ pr_created: "${{ steps.version.outputs.pr_created }}"
+ version: "${{ steps.version.outputs.version }}"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - id: "extract_branch"
+ name: "extract branch name"
+ run: |
+ echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - id: "version"
+ name: "get release version"
+ run: |
+ npm install
+ npm exec -- release-please release-pr \
+ --consider-all-branches \
+ --dry-run \
+ --dry-run-output release.json \
+ --release-type simple \
+ --repo-url="${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ steps.extract_branch.outputs.branch }}" \
+ --token="${{ secrets.GH_TOKEN }}" \
+ --versioning-strategy "${{ env.VERSIONING_STRATEGY }}"
+
+ if [[ `jq length release.json` -gt 1 ]]; then
+ echo 'release-please would create more than 1 PR, so cannot determine correct version'
+ echo "pr_created=false" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+
+ if [[ `jq length release.json` -eq 0 ]]; then
+ echo "pr_created=false" >> $GITHUB_OUTPUT
+ else
+ version="$(npm run --silent get-version)"
+ echo "Parsed version: ${version}"
+ echo "version=${version}" >> $GITHUB_OUTPUT
+ echo "pr_created=true" >> $GITHUB_OUTPUT
+ fi
+ working-directory: "lib"
+name: "create release PR"
+"on":
+ push:
+ branches:
+ - "release-[0-9]+.[0-9]+.x"
+permissions:
+ contents: "write"
+ id-token: "write"
+ pull-requests: "write"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index cacdacf773a8..64970d1bd719 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -1,19 +1,131 @@
----
-name: 'create release'
-on:
+concurrency:
+ group: "create-release-${{ github.sha }}"
+env:
+ IMAGE_PREFIX: "grafana"
+ RELEASE_REPO: "grafana/loki"
+jobs:
+ createRelease:
+ if: "${{ fromJSON(needs.shouldRelease.outputs.shouldRelease) }}"
+ needs:
+ - "shouldRelease"
+ outputs:
+ sha: "${{ needs.shouldRelease.outputs.sha }}"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "setup node"
+ uses: "actions/setup-node@v4"
+ with:
+ node-version: 20
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up Cloud SDK"
+ uses: "google-github-actions/setup-gcloud@v2"
+ with:
+ version: ">= 452.0.0"
+ - name: "download binaries"
+ run: |
+ echo "downloading binaries to $(pwd)/dist"
+ gsutil cp -r gs://loki-build-artifacts/${{ needs.shouldRelease.outputs.sha }}/dist .
+ working-directory: "release"
+ - id: "release"
+ name: "create release"
+ run: |
+ npm install
+ npm exec -- release-please github-release \
+ --draft \
+ --release-type simple \
+ --repo-url="${{ env.RELEASE_REPO }}" \
+ --target-branch "${{ needs.shouldRelease.outputs.branch }}" \
+ --token="${{ secrets.GH_TOKEN }}"
+ working-directory: "lib"
+ - env:
+ GH_TOKEN: "${{ secrets.GH_TOKEN }}"
+ id: "upload"
+ name: "upload artifacts"
+ run: |
+ gh release upload ${{ needs.shouldRelease.outputs.name }} dist/*
+ gh release edit ${{ needs.shouldRelease.outputs.name }} --draft=false
+ working-directory: "release"
+ publishImages:
+ needs:
+ - "createRelease"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - name: "auth gcs"
+ uses: "google-github-actions/auth@v2"
+ with:
+ credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
+ - name: "Set up Cloud SDK"
+ uses: "google-github-actions/setup-gcloud@v2"
+ with:
+ version: ">= 452.0.0"
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - name: "Login to DockerHub (from vault)"
+ uses: "grafana/shared-workflows/actions/dockerhub-login@main"
+ - name: "download images"
+ run: |
+ echo "downloading images to $(pwd)/images"
+ gsutil cp -r gs://loki-build-artifacts/${{ needs.createRelease.outputs.sha }}/images .
+ - name: "publish docker images"
+ uses: "./lib/actions/push-images"
+ with:
+ imageDir: "images"
+ imagePrefix: "${{ env.IMAGE_PREFIX }}"
+ shouldRelease:
+ outputs:
+ branch: "${{ steps.extract_branch.outputs.branch }}"
+ name: "${{ steps.should_release.outputs.name }}"
+ sha: "${{ steps.should_release.outputs.sha }}"
+ shouldRelease: "${{ steps.should_release.outputs.shouldRelease }}"
+ runs-on: "ubuntu-latest"
+ steps:
+ - name: "pull code to release"
+ uses: "actions/checkout@v4"
+ with:
+ path: "release"
+ repository: "${{ env.RELEASE_REPO }}"
+ - name: "pull release library code"
+ uses: "actions/checkout@v4"
+ with:
+ path: "lib"
+ repository: "grafana/loki-release"
+ - id: "extract_branch"
+ name: "extract branch name"
+ run: |
+ echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
+ working-directory: "release"
+ - id: "should_release"
+ name: "should a release be created?"
+ uses: "./lib/actions/should-release"
+ with:
+ baseBranch: "${{ steps.extract_branch.outputs.branch }}"
+name: "create release"
+"on":
push:
branches:
- - 'release-[0-9].[0-9].x'
- - 'k[0-9]*'
- workflow_dispatch: {}
+ - "release-[0-9]+.[0-9]+.x"
+ - "k[0-9]+"
permissions:
- contents: write
- pull-requests: write
-jobs:
- release:
- uses: github/loki-release/.github/workflows/release.yml@main
- with:
- release_repo: grafana/loki
- secrets:
- GCS_SERVICE_ACCOUNT_KEY: '${{ secrets.BACKEND_ENTERPRISE_DRONE }}'
- GH_TOKEN: '${{ secrets.GH_TOKEN }}'
+ contents: "write"
+ id-token: "write"
+ pull-requests: "write"
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 0e134950eab8..928eee2e123e 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,6 +1,3 @@
{
- "cmd/loki": "2.9.4",
- "cmd/loki-canary": "2.9.4",
- "cmd/logcli": "2.9.4",
- "clients/cmd/promtail": "2.9.4"
+ ".": "2.9.4"
}
diff --git a/Makefile b/Makefile
index 2acf8b428504..aaa64d755e03 100644
--- a/Makefile
+++ b/Makefile
@@ -863,3 +863,7 @@ snyk: loki-image build-image
.PHONY: scan-vulnerabilities
scan-vulnerabilities: trivy snyk
+
+.PHONY: release-workflows
+release-workflows:
+ jsonnet -SJ .github/vendor -m .github/workflows .github/release-workflows.jsonnet
From 1f3e64bbe19799156f01b34c46e56086b6d9169d Mon Sep 17 00:00:00 2001
From: Robert Jacob
Date: Wed, 21 Feb 2024 13:20:34 +0100
Subject: [PATCH 100/130] fix(operator): Support using multiple buckets with
AWS STS (#12008)
Co-authored-by: Periklis Tsirakidis
---
operator/CHANGELOG.md | 1 +
operator/internal/manifests/internal/config/build_test.go | 3 ++-
operator/internal/manifests/internal/config/loki-config.yaml | 3 ++-
3 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index d504e4ee31b5..8dae6eced0bf 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [12008](https://github.com/grafana/loki/pull/12008) **xperimental**: Support using multiple buckets with AWS STS
- [11964](https://github.com/grafana/loki/pull/11964) **xperimental**: Provide Azure region for managed credentials using environment variable
- [11920](https://github.com/grafana/loki/pull/11920) **xperimental**: Refactor handling of credentials in managed-auth mode
- [11869](https://github.com/grafana/loki/pull/11869) **periklis**: Add support for running with Google Workload Identity
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 187dc6514202..602672c813bf 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -5436,7 +5436,8 @@ func TestBuild_ConfigAndRuntimeConfig_STS(t *testing.T) {
}
expStorageConfig := `
s3:
- s3: s3://my-region/my-bucket
+ bucketnames: my-bucket
+ region: my-region
s3forcepathstyle: false`
expCfg := `
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index f908253a0c22..5d729eaffaf9 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -29,7 +29,8 @@ common:
{{- with .ObjectStorage.S3 }}
s3:
{{- if .STS }}
- s3: "s3://{{.Region}}/{{.Buckets}}"
+ bucketnames: {{.Buckets}}
+ region: {{.Region}}
s3forcepathstyle: false
{{- else }}
s3: {{ .Endpoint }}
From b9d8eefe27141137a779a595772830342f36fef3 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Wed, 21 Feb 2024 17:05:46 +0100
Subject: [PATCH 101/130] test: Use table prefix in bloomstore tests (#12022)
Signed-off-by: Christian Haudum
---
pkg/storage/stores/shipper/bloomshipper/store_test.go | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go
index c99aa46df4bf..59d8eee46405 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go
@@ -30,8 +30,7 @@ func newMockBloomStore(t *testing.T) (*BloomStore, string) {
IndexTables: storageconfig.IndexPeriodicTableConfig{
PeriodicTableConfig: storageconfig.PeriodicTableConfig{
Period: 24 * time.Hour,
- // TODO(chaudum): Integrate {,Parse}MetaKey into schema config
- // Prefix: "schema_a_table_",
+ Prefix: "schema_a_table_",
}},
},
{
@@ -40,8 +39,7 @@ func newMockBloomStore(t *testing.T) (*BloomStore, string) {
IndexTables: storageconfig.IndexPeriodicTableConfig{
PeriodicTableConfig: storageconfig.PeriodicTableConfig{
Period: 24 * time.Hour,
- // TODO(chaudum): Integrate {,Parse}MetaKey into schema config
- // Prefix: "schema_b_table_",
+ Prefix: "schema_b_table_",
}},
},
}
From aeaefe6aab3f611684dcf8c05756947d02c6e1e5 Mon Sep 17 00:00:00 2001
From: Robert Jacob
Date: Wed, 21 Feb 2024 19:14:49 +0100
Subject: [PATCH 102/130] feat(operator): Extend Azure secret validation
(#12007)
Co-authored-by: Periklis Tsirakidis
---
operator/CHANGELOG.md | 1 +
.../handlers/internal/storage/secrets.go | 33 ++++++++++++++--
.../handlers/internal/storage/secrets_test.go | 39 ++++++++++++-------
3 files changed, 55 insertions(+), 18 deletions(-)
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 8dae6eced0bf..e6aaec29b99c 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [12007](https://github.com/grafana/loki/pull/12007) **xperimental**: Extend Azure secret validation
- [12008](https://github.com/grafana/loki/pull/12008) **xperimental**: Support using multiple buckets with AWS STS
- [11964](https://github.com/grafana/loki/pull/11964) **xperimental**: Provide Azure region for managed credentials using environment variable
- [11920](https://github.com/grafana/loki/pull/11920) **xperimental**: Refactor handling of credentials in managed-auth mode
diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
index 2492eea4d419..80dde97b6136 100644
--- a/operator/internal/handlers/internal/storage/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -1,11 +1,14 @@
package storage
import (
+ "bytes"
"context"
"crypto/sha1"
+ "encoding/base64"
"encoding/json"
"errors"
"fmt"
+ "io"
"sort"
corev1 "k8s.io/api/core/v1"
@@ -33,9 +36,18 @@ var (
errAzureNoCredentials = errors.New("azure storage secret does contain neither account_key or client_id")
errAzureMixedCredentials = errors.New("azure storage secret can not contain both account_key and client_id")
errAzureManagedIdentityNoOverride = errors.New("when in managed mode, storage secret can not contain credentials")
+ errAzureInvalidEnvironment = errors.New("azure environment invalid (valid values: AzureGlobal, AzureChinaCloud, AzureGermanCloud, AzureUSGovernment)")
+ errAzureInvalidAccountKey = errors.New("azure account key is not valid base64")
errGCPParseCredentialsFile = errors.New("gcp storage secret cannot be parsed from JSON content")
errGCPWrongCredentialSourceFile = errors.New("credential source in secret needs to point to token file")
+
+ azureValidEnvironments = map[string]bool{
+ "AzureGlobal": true,
+ "AzureChinaCloud": true,
+ "AzureGermanCloud": true,
+ "AzureUSGovernment": true,
+ }
)
const gcpAccountTypeExternal = "external_account"
@@ -159,11 +171,15 @@ func hashSecretData(s *corev1.Secret) (string, error) {
func extractAzureConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*storage.AzureStorageConfig, error) {
// Extract and validate mandatory fields
- env := s.Data[storage.KeyAzureEnvironmentName]
- if len(env) == 0 {
+ env := string(s.Data[storage.KeyAzureEnvironmentName])
+ if env == "" {
return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureEnvironmentName)
}
+ if !azureValidEnvironments[env] {
+ return nil, fmt.Errorf("%w: %s", errAzureInvalidEnvironment, env)
+ }
+
accountName := s.Data[storage.KeyAzureStorageAccountName]
if len(accountName) == 0 {
return nil, fmt.Errorf("%w: %s", errSecretMissingField, storage.KeyAzureStorageAccountName)
@@ -188,7 +204,7 @@ func extractAzureConfigSecret(s *corev1.Secret, fg configv1.FeatureGates) (*stor
}
return &storage.AzureStorageConfig{
- Env: string(env),
+ Env: env,
Container: string(container),
EndpointSuffix: string(endpointSuffix),
Audience: string(audience),
@@ -219,6 +235,10 @@ func validateAzureCredentials(s *corev1.Secret, fg configv1.FeatureGates) (workl
}
if len(accountKey) > 0 {
+ if err := validateBase64(accountKey); err != nil {
+ return false, errAzureInvalidAccountKey
+ }
+
// have both account_name and account_key -> no workload identity federation
return false, nil
}
@@ -235,6 +255,13 @@ func validateAzureCredentials(s *corev1.Secret, fg configv1.FeatureGates) (workl
return true, nil
}
+func validateBase64(data []byte) error {
+ buf := bytes.NewBuffer(data)
+ reader := base64.NewDecoder(base64.StdEncoding, buf)
+ _, err := io.ReadAll(reader)
+ return err
+}
+
func extractGCSConfigSecret(s *corev1.Secret) (*storage.GCSStorageConfig, error) {
// Extract and validate mandatory fields
bucket := s.Data[storage.KeyGCPStorageBucketName]
diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
index ca3623b718c1..647de5632b4b 100644
--- a/operator/internal/handlers/internal/storage/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -84,11 +84,20 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{},
wantError: "missing secret field: environment",
},
+ {
+ name: "invalid environment",
+ secret: &corev1.Secret{
+ Data: map[string][]byte{
+ "environment": []byte("invalid-environment"),
+ },
+ },
+ wantError: "azure environment invalid (valid values: AzureGlobal, AzureChinaCloud, AzureGermanCloud, AzureUSGovernment): invalid-environment",
+ },
{
name: "missing account_name",
secret: &corev1.Secret{
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
},
},
wantError: "missing secret field: account_name",
@@ -97,7 +106,7 @@ func TestAzureExtract(t *testing.T) {
name: "missing container",
secret: &corev1.Secret{
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"account_name": []byte("id"),
},
},
@@ -108,7 +117,7 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("id"),
},
@@ -120,7 +129,7 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("test-account-name"),
"account_key": []byte("test-account-key"),
@@ -134,7 +143,7 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("test-account-name"),
"client_id": []byte("test-client-id"),
@@ -147,7 +156,7 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("test-account-name"),
"client_id": []byte("test-client-id"),
@@ -161,7 +170,7 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"account_name": []byte("test-account-name"),
"container": []byte("this,that"),
"region": []byte("test-region"),
@@ -184,10 +193,10 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("id"),
- "account_key": []byte("secret"),
+ "account_key": []byte("dGVzdC1hY2NvdW50LWtleQ=="), // test-account-key
"audience": []byte("test-audience"),
},
},
@@ -198,10 +207,10 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("id"),
- "account_key": []byte("secret"),
+ "account_key": []byte("dGVzdC1hY2NvdW50LWtleQ=="), // test-account-key
},
},
wantCredentialMode: lokiv1.CredentialModeStatic,
@@ -211,7 +220,7 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("test-account-name"),
"client_id": []byte("test-client-id"),
@@ -227,7 +236,7 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"account_name": []byte("test-account-name"),
"container": []byte("this,that"),
"region": []byte("test-region"),
@@ -256,10 +265,10 @@ func TestAzureExtract(t *testing.T) {
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Data: map[string][]byte{
- "environment": []byte("here"),
+ "environment": []byte("AzureGlobal"),
"container": []byte("this,that"),
"account_name": []byte("id"),
- "account_key": []byte("secret"),
+ "account_key": []byte("dGVzdC1hY2NvdW50LWtleQ=="), // test-account-key
"endpoint_suffix": []byte("suffix"),
},
},
From e835b6192f9438ab64fd2aec8b67a50a852e74e1 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Wed, 21 Feb 2024 11:52:33 -0800
Subject: [PATCH 103/130] refactor(blooms): refactors `blockLoadingIter`,
exposing more error cases + additional metrics (#12015)
---
pkg/bloomcompactor/batch.go | 165 ++++++++++++++++----------------
pkg/bloomcompactor/spec.go | 3 -
pkg/bloomcompactor/spec_test.go | 38 +++-----
pkg/storage/bloom/v1/builder.go | 31 +++---
pkg/storage/bloom/v1/metrics.go | 15 ++-
5 files changed, 120 insertions(+), 132 deletions(-)
diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
index e9fae9f9df0f..660f5642b464 100644
--- a/pkg/bloomcompactor/batch.go
+++ b/pkg/bloomcompactor/batch.go
@@ -220,16 +220,89 @@ func (i *blockLoadingIter) Err() error {
return i.iter.Err()
}
+func (i *blockLoadingIter) init() {
+ if i.initialized {
+ return
+ }
+
+ // group overlapping blocks
+ i.overlapping = overlappingBlocksIter(i.inputs)
+
+ // set initial iter
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+
+ // set "match all" filter function if not present
+ if i.filter == nil {
+ i.filter = func(cbq *bloomshipper.CloseableBlockQuerier) bool { return true }
+ }
+
+ // done
+ i.initialized = true
+}
+
+// load next populates the underlying iter via relevant batches
+// and returns the result of iter.Next()
+func (i *blockLoadingIter) loadNext() bool {
+ for i.overlapping.Next() {
+ blockRefs := i.overlapping.At()
+
+ loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize)
+ filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
+
+ iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
+ for filtered.Next() {
+ bq := loader.At()
+ i.loaded[bq] = struct{}{}
+ iter, err := bq.SeriesIter()
+ if err != nil {
+ i.err = err
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ return false
+ }
+ iters = append(iters, iter)
+ }
+
+ if err := filtered.Err(); err != nil {
+ i.err = err
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ return false
+ }
+
+ // edge case: we've filtered out all blocks in the batch; check next batch
+ if len(iters) == 0 {
+ continue
+ }
+
+ // Turn the list of blocks into a single iterator that returns the next series
+ mergedBlocks := v1.NewHeapIterForSeriesWithBloom(iters...)
+ // two overlapping blocks can conceivably have the same series, so we need to dedupe,
+ // preferring the one with the most chunks already indexed since we'll have
+ // to add fewer chunks to the bloom
+ i.iter = v1.NewDedupingIter[*v1.SeriesWithBloom, *v1.SeriesWithBloom](
+ func(a, b *v1.SeriesWithBloom) bool {
+ return a.Series.Fingerprint == b.Series.Fingerprint
+ },
+ v1.Identity[*v1.SeriesWithBloom],
+ func(a, b *v1.SeriesWithBloom) *v1.SeriesWithBloom {
+ if len(a.Series.Chunks) > len(b.Series.Chunks) {
+ return a
+ }
+ return b
+ },
+ v1.NewPeekingIter(mergedBlocks),
+ )
+ return i.iter.Next()
+ }
+
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.err = i.overlapping.Err()
+ return false
+}
+
// Next implements v1.Iterator.
func (i *blockLoadingIter) Next() bool {
i.init()
- // next from current batch
- hasNext := i.iter.Next()
- if !hasNext && !i.loadNext() {
- return false
- }
- // next from next batch
- return i.iter.Next()
+ return i.iter.Next() || i.loadNext()
}
// Close implements v1.CloseableIterator.
@@ -255,26 +328,6 @@ func (i *blockLoadingIter) Reset() error {
return err
}
-func (i *blockLoadingIter) init() {
- if i.initialized {
- return
- }
-
- // group overlapping blocks
- i.overlapping = overlappingBlocksIter(i.inputs)
-
- // set "match all" filter function if not present
- if i.filter == nil {
- i.filter = func(cbq *bloomshipper.CloseableBlockQuerier) bool { return true }
- }
-
- // load first batch
- i.loadNext()
-
- // done
- i.initialized = true
-}
-
func (i *blockLoadingIter) Filter(filter func(*bloomshipper.CloseableBlockQuerier) bool) {
if i.initialized {
panic("iterator already initialized")
@@ -282,64 +335,6 @@ func (i *blockLoadingIter) Filter(filter func(*bloomshipper.CloseableBlockQuerie
i.filter = filter
}
-func (i *blockLoadingIter) loadNext() bool {
- // check if there are more overlapping groups to load
- if !i.overlapping.Next() {
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
- if i.overlapping.Err() != nil {
- i.err = i.overlapping.Err()
- }
-
- return false
- }
-
- blockRefs := i.overlapping.At()
-
- loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize)
- filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
-
- iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
- for filtered.Next() {
- bq := loader.At()
- if _, ok := i.loaded[bq]; !ok {
- i.loaded[bq] = struct{}{}
- }
- iter, _ := bq.SeriesIter()
- iters = append(iters, iter)
- }
-
- if err := filtered.Err(); err != nil {
- i.err = err
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
- return false
- }
-
- if len(iters) == 0 {
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
- return true
- }
-
- // Turn the list of blocks into a single iterator that returns the next series
- mergedBlocks := v1.NewHeapIterForSeriesWithBloom(iters...)
- // two overlapping blocks can conceivably have the same series, so we need to dedupe,
- // preferring the one with the most chunks already indexed since we'll have
- // to add fewer chunks to the bloom
- i.iter = v1.NewDedupingIter[*v1.SeriesWithBloom, *v1.SeriesWithBloom](
- func(a, b *v1.SeriesWithBloom) bool {
- return a.Series.Fingerprint == b.Series.Fingerprint
- },
- v1.Identity[*v1.SeriesWithBloom],
- func(a, b *v1.SeriesWithBloom) *v1.SeriesWithBloom {
- if len(a.Series.Chunks) > len(b.Series.Chunks) {
- return a
- }
- return b
- },
- v1.NewPeekingIter(mergedBlocks),
- )
- return true
-}
-
func overlappingBlocksIter(inputs []bloomshipper.BlockRef) v1.Iterator[[]bloomshipper.BlockRef] {
// can we assume sorted blocks?
peekIter := v1.NewPeekingIter(v1.NewSliceIter(inputs))
diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go
index cb030dfb5913..13707186a183 100644
--- a/pkg/bloomcompactor/spec.go
+++ b/pkg/bloomcompactor/spec.go
@@ -45,7 +45,6 @@ type SimpleBloomGenerator struct {
store v1.Iterator[*v1.Series]
chunkLoader ChunkLoader
blocksIter v1.ResettableIterator[*v1.SeriesWithBloom]
- skipped []v1.BlockMetadata
// options to build blocks with
opts v1.BlockOptions
@@ -120,14 +119,12 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Blo
schema := md.Options.Schema
if err != nil {
level.Warn(logger).Log("msg", "failed to get schema for block", "err", err)
- s.skipped = append(s.skipped, md)
bq.Close() // close unused querier
return false
}
if !s.opts.Schema.Compatible(schema) {
level.Warn(logger).Log("msg", "block schema incompatible with options", "generator_schema", fmt.Sprintf("%+v", s.opts.Schema), "block_schema", fmt.Sprintf("%+v", schema))
- s.skipped = append(s.skipped, md)
bq.Close() // close unused querier
return false
}
diff --git a/pkg/bloomcompactor/spec_test.go b/pkg/bloomcompactor/spec_test.go
index f278948fed7a..d5a4502a0f17 100644
--- a/pkg/bloomcompactor/spec_test.go
+++ b/pkg/bloomcompactor/spec_test.go
@@ -111,38 +111,23 @@ func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Ser
func TestSimpleBloomGenerator(t *testing.T) {
const maxBlockSize = 100 << 20 // 100MB
for _, tc := range []struct {
- desc string
- fromSchema, toSchema v1.BlockOptions
- sourceBlocks, numSkipped, outputBlocks int
- overlapping bool
+ desc string
+ fromSchema, toSchema v1.BlockOptions
+ overlapping bool
}{
{
- desc: "SkipsIncompatibleSchemas",
- fromSchema: v1.NewBlockOptions(3, 0, maxBlockSize),
- toSchema: v1.NewBlockOptions(4, 0, maxBlockSize),
- sourceBlocks: 2,
- numSkipped: 2,
- outputBlocks: 1,
+ desc: "SkipsIncompatibleSchemas",
+ fromSchema: v1.NewBlockOptions(3, 0, maxBlockSize),
+ toSchema: v1.NewBlockOptions(4, 0, maxBlockSize),
},
{
- desc: "CombinesBlocks",
- fromSchema: v1.NewBlockOptions(4, 0, maxBlockSize),
- toSchema: v1.NewBlockOptions(4, 0, maxBlockSize),
- sourceBlocks: 2,
- numSkipped: 0,
- outputBlocks: 1,
- },
- {
- desc: "MaxBlockSize",
- fromSchema: v1.NewBlockOptions(4, 0, maxBlockSize),
- toSchema: v1.NewBlockOptions(4, 0, 1<<10), // 1KB
- sourceBlocks: 2,
- numSkipped: 0,
- outputBlocks: 6,
+ desc: "CombinesBlocks",
+ fromSchema: v1.NewBlockOptions(4, 0, maxBlockSize),
+ toSchema: v1.NewBlockOptions(4, 0, maxBlockSize),
},
} {
t.Run(tc.desc, func(t *testing.T) {
- sourceBlocks, data, refs := blocksFromSchemaWithRange(t, tc.sourceBlocks, tc.fromSchema, 0x00000, 0x6ffff)
+ sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff)
storeItr := v1.NewMapIter[v1.SeriesWithBloom, *v1.Series](
v1.NewSliceIter[v1.SeriesWithBloom](data),
func(swb v1.SeriesWithBloom) *v1.Series {
@@ -157,8 +142,7 @@ func TestSimpleBloomGenerator(t *testing.T) {
for results.Next() {
outputBlocks = append(outputBlocks, results.At())
}
- require.Equal(t, tc.outputBlocks, len(outputBlocks))
- require.Equal(t, tc.numSkipped, len(gen.skipped))
+ // require.Equal(t, tc.outputBlocks, len(outputBlocks))
// Check all the input series are present in the output blocks.
expectedRefs := v1.PointerSlice(data)
diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
index b094b847f2ef..8428ac987e4d 100644
--- a/pkg/storage/bloom/v1/builder.go
+++ b/pkg/storage/bloom/v1/builder.go
@@ -549,10 +549,16 @@ func NewMergeBuilder(
func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
var (
- nextInBlocks *SeriesWithBloom
+ nextInBlocks *SeriesWithBloom
+ blocksFinished bool
+ blockSeriesIterated, chunksIndexed, chunksCopied int
)
- deduped := mb.blocks
+ defer func() {
+ mb.metrics.blockSeriesIterated.Add(float64(blockSeriesIterated))
+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeIterated).Add(float64(chunksIndexed))
+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeCopied).Add(float64(chunksCopied))
+ }()
for mb.store.Next() {
nextInStore := mb.store.At()
@@ -562,16 +568,20 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
// TODO(owen-d): expensive, but Seek is not implemented for this itr.
// It's also more efficient to build an iterator over the Series file in the index
// without the blooms until we find a bloom we actually need to unpack from the blooms file.
- for nextInBlocks == nil || nextInBlocks.Series.Fingerprint < mb.store.At().Fingerprint {
- if !deduped.Next() {
+ for !blocksFinished && (nextInBlocks == nil || nextInBlocks.Series.Fingerprint < mb.store.At().Fingerprint) {
+ if !mb.blocks.Next() {
// we've exhausted all the blocks
+ blocksFinished = true
nextInBlocks = nil
break
}
- nextInBlocks = deduped.At()
- }
- var chunksIndexed, chunksCopied int
+ if err := mb.blocks.Err(); err != nil {
+ return 0, errors.Wrap(err, "iterating blocks")
+ }
+ blockSeriesIterated++
+ nextInBlocks = mb.blocks.At()
+ }
cur := nextInBlocks
chunksToAdd := nextInStore.Chunks
@@ -588,10 +598,10 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
} else {
// if the series already exists in the block, we only need to add the new chunks
chunksToAdd = nextInStore.Chunks.Unless(nextInBlocks.Series.Chunks)
- chunksCopied = len(nextInStore.Chunks) - len(chunksToAdd)
+ chunksCopied += len(nextInStore.Chunks) - len(chunksToAdd)
}
- chunksIndexed = len(chunksToAdd)
+ chunksIndexed += len(chunksToAdd)
if len(chunksToAdd) > 0 {
if err := mb.populate(
@@ -605,9 +615,6 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
}
}
- mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeIterated).Add(float64(chunksIndexed))
- mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeCopied).Add(float64(chunksCopied))
-
blockFull, err := builder.AddSeries(*cur)
if err != nil {
return 0, errors.Wrap(err, "adding series to block")
diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go
index f5568a9d7659..57ffd2670641 100644
--- a/pkg/storage/bloom/v1/metrics.go
+++ b/pkg/storage/bloom/v1/metrics.go
@@ -6,11 +6,12 @@ import (
)
type Metrics struct {
- sbfCreationTime prometheus.Counter // time spent creating sbfs
- bloomSize prometheus.Histogram // size of the bloom filter in bytes
- hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter
- estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter
- chunksIndexed *prometheus.CounterVec
+ sbfCreationTime prometheus.Counter // time spent creating sbfs
+ bloomSize prometheus.Histogram // size of the bloom filter in bytes
+ hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter
+ estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter
+ chunksIndexed *prometheus.CounterVec
+ blockSeriesIterated prometheus.Counter
}
const chunkIndexedTypeIterated = "iterated"
@@ -41,5 +42,9 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
Name: "bloom_chunks_indexed_total",
Help: "Number of chunks indexed in bloom filters, partitioned by type. Type can be iterated or copied, where iterated indicates the chunk data was fetched and ngrams for it's contents generated whereas copied indicates the chunk already existed in another source block and was copied to the new block",
}, []string{"type"}),
+ blockSeriesIterated: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Name: "bloom_block_series_iterated_total",
+ Help: "Number of series iterated in existing blocks while generating new blocks",
+ }),
}
}
From 1dce7b512c4ea6123f780e2e47d4da824f82241f Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Wed, 21 Feb 2024 12:05:10 -0800
Subject: [PATCH 104/130] fix(blooms): race condition in `BloomPageDecoder`
(#12027)
---
pkg/chunkenc/pool.go | 3 +++
pkg/storage/bloom/v1/bloom.go | 11 ++++++++++-
pkg/storage/bloom/v1/bloom_querier.go | 7 +++++++
3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/pkg/chunkenc/pool.go b/pkg/chunkenc/pool.go
index ebe1924e8b65..4b6cf7abb90b 100644
--- a/pkg/chunkenc/pool.go
+++ b/pkg/chunkenc/pool.go
@@ -349,6 +349,9 @@ func (pool *SnappyPool) GetReader(src io.Reader) (io.Reader, error) {
// PutReader places back in the pool a CompressionReader
func (pool *SnappyPool) PutReader(reader io.Reader) {
+ r := reader.(*snappy.Reader)
+ // Reset to free reference to the underlying reader
+ r.Reset(nil)
pool.readers.Put(reader)
}
diff --git a/pkg/storage/bloom/v1/bloom.go b/pkg/storage/bloom/v1/bloom.go
index 20c310ef695c..f4339f004482 100644
--- a/pkg/storage/bloom/v1/bloom.go
+++ b/pkg/storage/bloom/v1/bloom.go
@@ -65,9 +65,9 @@ func LazyDecodeBloomPage(dec *encoding.Decbuf, pool chunkenc.ReaderPool, decompr
if err != nil {
return nil, errors.Wrap(err, "getting decompressor")
}
+ defer pool.PutReader(decompressor)
b := BlockPool.Get(decompressedSize)[:decompressedSize]
- defer BlockPool.Put(b)
if _, err = io.ReadFull(decompressor, b); err != nil {
return nil, errors.Wrap(err, "decompressing bloom page")
@@ -107,6 +107,15 @@ type BloomPageDecoder struct {
err error
}
+// Drop returns the underlying byte slice to the pool
+// for efficiency. It's intended to be used as a
+// perf optimization prior to garbage collection.
+func (d *BloomPageDecoder) Drop() {
+ if cap(d.data) > 0 {
+ BlockPool.Put(d.data)
+ }
+}
+
func (d *BloomPageDecoder) Reset() {
d.err = nil
d.cur = nil
diff --git a/pkg/storage/bloom/v1/bloom_querier.go b/pkg/storage/bloom/v1/bloom_querier.go
index 372aff8d7041..1292addb7543 100644
--- a/pkg/storage/bloom/v1/bloom_querier.go
+++ b/pkg/storage/bloom/v1/bloom_querier.go
@@ -38,6 +38,12 @@ func (it *LazyBloomIter) Seek(offset BloomOffset) {
// if we need a different page or the current page hasn't been loaded,
// load the desired page
if it.curPageIndex != offset.Page || it.curPage == nil {
+
+ // drop the current page if it exists
+ if it.curPage != nil {
+ it.curPage.Drop()
+ }
+
r, err := it.b.reader.Blooms()
if err != nil {
it.err = errors.Wrap(err, "getting blooms reader")
@@ -97,6 +103,7 @@ func (it *LazyBloomIter) next() bool {
}
// we've exhausted the current page, progress to next
it.curPageIndex++
+ it.curPage.Drop()
it.curPage = nil
continue
}
From 6e4a9f3bdd0a90b55e08209bb414961b8aff0803 Mon Sep 17 00:00:00 2001
From: J Stickler
Date: Wed, 21 Feb 2024 15:19:09 -0500
Subject: [PATCH 105/130] docs: Restore link to flog (#12011)
Co-authored-by: Trevor Whitney
---
docs/sources/get-started/quick-start.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md
index cdca6858f271..ac12c9b22f10 100644
--- a/docs/sources/get-started/quick-start.md
+++ b/docs/sources/get-started/quick-start.md
@@ -11,7 +11,7 @@ If you want to experiment with Loki, you can run Loki locally using the Docker C
The Docker Compose configuration instantiates the following components, each in its own container:
-- **Flog** a sample application which generates log lines.
+- **Flog** a sample application which generates log lines. [Flog](https://github.com/mingrammer/flog) is a log generator for common log formats.
- **Promtail** which scrapes the log lines from Flog, and pushes them to Loki through the gateway.
- **Gateway** (NGINX) which receives requests and redirects them to the appropriate container based on the request's URL.
- One Loki **read** component.
From b47f2bfbc6f8f096d1df2fbb963b82f92e8af6a2 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Wed, 21 Feb 2024 23:20:25 +0100
Subject: [PATCH 106/130] refactor: Limit processing of blocks to requested
fingerprint ranges in bloom gateway (#11987)
This PR limits the amount of data being processed for a single multiplexed iteration to the union of the fingerprint bounds of its requests, instead of looking at blocks from the complete fingerprint range.
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/processor.go | 10 ++-
pkg/bloomgateway/worker.go | 8 +-
pkg/storage/bloom/v1/bounds.go | 35 ++++++++
pkg/storage/bloom/v1/bounds_test.go | 129 ++++++++++++++++++++++++++++
4 files changed, 178 insertions(+), 4 deletions(-)
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index a7641bde0c96..687d60dedd13 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -29,9 +29,13 @@ type processor struct {
}
func (p *processor) run(ctx context.Context, tasks []Task) error {
+ return p.runWithBounds(ctx, tasks, v1.MultiFingerprintBounds{{Min: 0, Max: math.MaxUint64}})
+}
+
+func (p *processor) runWithBounds(ctx context.Context, tasks []Task, bounds v1.MultiFingerprintBounds) error {
for ts, tasks := range group(tasks, func(t Task) config.DayTime { return t.table }) {
tenant := tasks[0].Tenant
- err := p.processTasks(ctx, tenant, ts, []v1.FingerprintBounds{{Min: 0, Max: math.MaxUint64}}, tasks)
+ err := p.processTasks(ctx, tenant, ts, bounds, tasks)
if err != nil {
for _, task := range tasks {
task.CloseWithError(err)
@@ -45,13 +49,13 @@ func (p *processor) run(ctx context.Context, tasks []Task) error {
return nil
}
-func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, keyspaces []v1.FingerprintBounds, tasks []Task) error {
+func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, keyspaces v1.MultiFingerprintBounds, tasks []Task) error {
minFpRange, maxFpRange := getFirstLast(keyspaces)
interval := bloomshipper.NewInterval(day.Bounds())
metaSearch := bloomshipper.MetaSearchParams{
TenantID: tenant,
Interval: interval,
- Keyspace: v1.FingerprintBounds{Min: minFpRange.Min, Max: maxFpRange.Max},
+ Keyspace: v1.NewBounds(minFpRange.Min, maxFpRange.Max),
}
metas, err := p.store.FetchMetas(ctx, metaSearch)
if err != nil {
diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go
index ec44081c1b30..af61cdc1a0bd 100644
--- a/pkg/bloomgateway/worker.go
+++ b/pkg/bloomgateway/worker.go
@@ -10,8 +10,10 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/common/model"
"github.com/grafana/loki/pkg/queue"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
@@ -147,6 +149,7 @@ func (w *worker) running(_ context.Context) error {
w.metrics.tasksDequeued.WithLabelValues(w.id, labelSuccess).Add(float64(len(items)))
tasks := make([]Task, 0, len(items))
+ var mb v1.MultiFingerprintBounds
for _, item := range items {
task, ok := item.(Task)
if !ok {
@@ -157,10 +160,13 @@ func (w *worker) running(_ context.Context) error {
level.Debug(w.logger).Log("msg", "dequeued task", "task", task.ID)
w.pending.Delete(task.ID)
tasks = append(tasks, task)
+
+ first, last := getFirstLast(task.series)
+ mb = mb.Union(v1.NewBounds(model.Fingerprint(first.Fingerprint), model.Fingerprint(last.Fingerprint)))
}
start = time.Now()
- err = p.run(taskCtx, tasks)
+ err = p.runWithBounds(taskCtx, tasks, mb)
if err != nil {
w.metrics.processDuration.WithLabelValues(w.id, labelFailure).Observe(time.Since(start).Seconds())
diff --git a/pkg/storage/bloom/v1/bounds.go b/pkg/storage/bloom/v1/bounds.go
index e7ff804d55cd..d3bdc0ee2200 100644
--- a/pkg/storage/bloom/v1/bounds.go
+++ b/pkg/storage/bloom/v1/bounds.go
@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
+ "golang.org/x/exp/slices"
"github.com/grafana/loki/pkg/util/encoding"
)
@@ -169,6 +170,40 @@ func (b FingerprintBounds) Unless(target FingerprintBounds) (res []FingerprintBo
return res
}
+type MultiFingerprintBounds []FingerprintBounds
+
+func (mb MultiFingerprintBounds) Union(target FingerprintBounds) MultiFingerprintBounds {
+ if len(mb) == 0 {
+ return MultiFingerprintBounds{target}
+ }
+ if len(mb) == 1 {
+ return mb[0].Union(target)
+ }
+
+ mb = append(mb, target)
+ slices.SortFunc(mb, func(a, b FingerprintBounds) int {
+ if a.Less(b) {
+ return -1
+ } else if a.Equal(b) {
+ return 0
+ }
+ return 1
+ })
+
+ var union MultiFingerprintBounds
+ for i := 0; i < len(mb); i++ {
+ j := len(union) - 1 // index of last item of union
+ if j >= 0 && union[j].Max >= mb[i].Min-1 {
+ union[j] = NewBounds(union[j].Min, max(mb[i].Max, union[j].Max))
+ } else {
+ union = append(union, mb[i])
+ }
+ }
+
+ mb = union
+ return mb
+}
+
// unused, but illustrative
type BoundedIter[V any] struct {
Iterator[V]
diff --git a/pkg/storage/bloom/v1/bounds_test.go b/pkg/storage/bloom/v1/bounds_test.go
index 1d687437fab6..4dd01e60c123 100644
--- a/pkg/storage/bloom/v1/bounds_test.go
+++ b/pkg/storage/bloom/v1/bounds_test.go
@@ -129,3 +129,132 @@ func Test_FingerprintBounds_Unless(t *testing.T) {
}, NewBounds(5, 25).Unless(target))
assert.Nil(t, NewBounds(14, 15).Unless(target))
}
+
+func Test_MultiFingerprintBounds(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ mb MultiFingerprintBounds
+ target FingerprintBounds
+ exp MultiFingerprintBounds
+ }{
+ {
+ desc: "no elements",
+ mb: MultiFingerprintBounds{},
+ target: NewBounds(0, 9),
+ exp: MultiFingerprintBounds{
+ NewBounds(0, 9),
+ },
+ },
+ {
+ desc: "single element before",
+ mb: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ },
+ target: NewBounds(15, 19),
+ exp: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ NewBounds(15, 19),
+ },
+ },
+ {
+ desc: "single element after",
+ mb: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ },
+ target: NewBounds(0, 3),
+ exp: MultiFingerprintBounds{
+ NewBounds(0, 3),
+ NewBounds(5, 9),
+ },
+ },
+ {
+ desc: "single element overlapping",
+ mb: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ },
+ target: NewBounds(0, 14),
+ exp: MultiFingerprintBounds{
+ NewBounds(0, 14),
+ },
+ },
+ {
+ desc: "multiple elements single overlapping",
+ mb: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ NewBounds(15, 19),
+ },
+ target: NewBounds(0, 6),
+ exp: MultiFingerprintBounds{
+ NewBounds(0, 9),
+ NewBounds(15, 19),
+ },
+ },
+ {
+ desc: "multiple elements single overlapping",
+ mb: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ NewBounds(15, 19),
+ },
+ target: NewBounds(11, 25),
+ exp: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ NewBounds(11, 25),
+ },
+ },
+ {
+ desc: "multiple elements combining overlapping",
+ mb: MultiFingerprintBounds{
+ NewBounds(5, 9),
+ NewBounds(15, 19),
+ },
+ target: NewBounds(9, 15),
+ exp: MultiFingerprintBounds{
+ NewBounds(5, 19),
+ },
+ },
+ {
+ desc: "combination",
+ mb: MultiFingerprintBounds{
+ NewBounds(0, 2),
+ NewBounds(5, 9),
+ NewBounds(15, 19),
+ NewBounds(25, 29),
+ },
+ target: NewBounds(9, 15),
+ exp: MultiFingerprintBounds{
+ NewBounds(0, 2),
+ NewBounds(5, 19),
+ NewBounds(25, 29),
+ },
+ },
+ {
+ desc: "overlapping ranges",
+ mb: MultiFingerprintBounds{
+ NewBounds(0, 6),
+ NewBounds(5, 15),
+ },
+ target: NewBounds(8, 10),
+ exp: MultiFingerprintBounds{
+ NewBounds(0, 15),
+ },
+ },
+ {
+ desc: "disjoint ranges and target is between",
+ mb: MultiFingerprintBounds{
+ NewBounds(0, 9),
+ NewBounds(30, 39),
+ },
+ target: NewBounds(15, 19),
+ exp: MultiFingerprintBounds{
+ NewBounds(0, 9),
+ NewBounds(15, 19),
+ NewBounds(30, 39),
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ res := tc.mb.Union(tc.target)
+ assert.Equal(t, tc.exp, res)
+ })
+ }
+}
From edc9a44f66b5fe983d6359794d8ba86a768b5384 Mon Sep 17 00:00:00 2001
From: Paul Rogers <129207811+paul1r@users.noreply.github.com>
Date: Wed, 21 Feb 2024 17:31:48 -0500
Subject: [PATCH 107/130] test: Finish sync work for mockIngester.pushed var
(#12028)
---
pkg/distributor/distributor_test.go | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 71830b4be4d2..75e3a6e78670 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -396,7 +396,8 @@ func Test_IncrementTimestamp(t *testing.T) {
distributors, _ := prepare(t, 1, 3, testData.limits, func(addr string) (ring_client.PoolClient, error) { return ing, nil })
_, err := distributors[0].Push(ctx, testData.push)
assert.NoError(t, err)
- assert.Equal(t, testData.expectedPush, ing.pushed[0])
+ topVal := ing.Peek()
+ assert.Equal(t, testData.expectedPush, topVal)
})
}
}
@@ -433,6 +434,8 @@ func TestDistributorPushConcurrently(t *testing.T) {
labels := make(map[string]int)
for i := range ingesters {
+ ingesters[i].mu.Lock()
+
pushed := ingesters[i].pushed
counter = counter + len(pushed)
for _, pr := range pushed {
@@ -440,6 +443,7 @@ func TestDistributorPushConcurrently(t *testing.T) {
labels[st.Labels] = labels[st.Labels] + 1
}
}
+ ingesters[i].mu.Unlock()
}
assert.Equal(t, numReq*3, counter) // RF=3
// each stream is present 3 times
@@ -500,7 +504,8 @@ func Test_SortLabelsOnPush(t *testing.T) {
request.Streams[0].Labels = `{buzz="f", a="b"}`
_, err := distributors[0].Push(ctx, request)
require.NoError(t, err)
- require.Equal(t, `{a="b", buzz="f"}`, ingester.pushed[0].Streams[0].Labels)
+ topVal := ingester.Peek()
+ require.Equal(t, `{a="b", buzz="f"}`, topVal.Streams[0].Labels)
}
func Test_TruncateLogLines(t *testing.T) {
@@ -519,7 +524,8 @@ func Test_TruncateLogLines(t *testing.T) {
_, err := distributors[0].Push(ctx, makeWriteRequest(1, 10))
require.NoError(t, err)
- require.Len(t, ingester.pushed[0].Streams[0].Entries[0].Line, 5)
+ topVal := ingester.Peek()
+ require.Len(t, topVal.Streams[0].Entries[0].Line, 5)
})
}
@@ -1231,6 +1237,17 @@ func (i *mockIngester) Push(_ context.Context, in *logproto.PushRequest, _ ...gr
return nil, nil
}
+func (i *mockIngester) Peek() *logproto.PushRequest {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ if len(i.pushed) == 0 {
+ return nil
+ }
+
+ return i.pushed[0]
+}
+
func (i *mockIngester) GetStreamRates(_ context.Context, _ *logproto.StreamRatesRequest, _ ...grpc.CallOption) (*logproto.StreamRatesResponse, error) {
return &logproto.StreamRatesResponse{}, nil
}
From 1f56da206d41becbce64708c6db59ca269d98182 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Thu, 22 Feb 2024 08:14:08 +0100
Subject: [PATCH 108/130] refactor: Simplify fingerprint partitioning in bloom
gateway client (#12018)
Use the recently introduced `tokenRangesForInstance()` function to determine the the ranges for each replication set.
For this purpose, the function moves into the `bloomutils` packages.
This PR eliminates the structs `addrsWithBounds` and `instanceWithFingerprints` (which had somewhat similar usage) and unifies them into `rsWithRanges` that holds both the fingerprint ranges and the assigned GroupedChunkRefs.
Signed-off-by: Christian Haudum
---
pkg/bloomcompactor/bloomcompactor.go | 98 +----
pkg/bloomcompactor/bloomcompactor_test.go | 3 +-
pkg/bloomgateway/client.go | 162 +++-----
pkg/bloomgateway/client_test.go | 443 +++++++---------------
pkg/bloomutils/ring.go | 121 ++++--
pkg/bloomutils/ring_test.go | 28 --
6 files changed, 288 insertions(+), 567 deletions(-)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index dd5a9c96ca81..85bca48f54f3 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -2,10 +2,6 @@ package bloomcompactor
import (
"context"
- "fmt"
- "math"
- "slices"
- "sort"
"sync"
"time"
@@ -212,7 +208,7 @@ func (c *Compactor) ownsTenant(tenant string) ([]v1.FingerprintBounds, bool, err
return nil, false, errors.Wrap(err, "getting ring healthy instances")
}
- ranges, err := tokenRangesForInstance(c.cfg.Ring.InstanceID, rs.Instances)
+ ranges, err := bloomutils.TokenRangesForInstance(c.cfg.Ring.InstanceID, rs.Instances)
if err != nil {
return nil, false, errors.Wrap(err, "getting token ranges for instance")
}
@@ -221,98 +217,6 @@ func (c *Compactor) ownsTenant(tenant string) ([]v1.FingerprintBounds, bool, err
return keyspaces, true, nil
}
-func tokenRangesForInstance(id string, instances []ring.InstanceDesc) (ranges ring.TokenRanges, err error) {
- var ownedTokens map[uint32]struct{}
-
- // lifted from grafana/dskit/ring/model.go <*Desc>.GetTokens()
- toks := make([][]uint32, 0, len(instances))
- for _, instance := range instances {
- if instance.Id == id {
- ranges = make(ring.TokenRanges, 0, 2*(len(instance.Tokens)+1))
- ownedTokens = make(map[uint32]struct{}, len(instance.Tokens))
- for _, tok := range instance.Tokens {
- ownedTokens[tok] = struct{}{}
- }
- }
-
- // Tokens may not be sorted for an older version which, so we enforce sorting here.
- tokens := instance.Tokens
- if !sort.IsSorted(ring.Tokens(tokens)) {
- sort.Sort(ring.Tokens(tokens))
- }
-
- toks = append(toks, tokens)
- }
-
- if cap(ranges) == 0 {
- return nil, fmt.Errorf("instance %s not found", id)
- }
-
- allTokens := ring.MergeTokens(toks)
- if len(allTokens) == 0 {
- return nil, errors.New("no tokens in the ring")
- }
-
- // mostly lifted from grafana/dskit/ring/token_range.go <*Ring>.GetTokenRangesForInstance()
-
- // non-zero value means we're now looking for start of the range. Zero value means we're looking for next end of range (ie. token owned by this instance).
- rangeEnd := uint32(0)
-
- // if this instance claimed the first token, it owns the wrap-around range, which we'll break into two separate ranges
- firstToken := allTokens[0]
- _, ownsFirstToken := ownedTokens[firstToken]
-
- if ownsFirstToken {
- // we'll start by looking for the beginning of the range that ends with math.MaxUint32
- rangeEnd = math.MaxUint32
- }
-
- // walk the ring backwards, alternating looking for ends and starts of ranges
- for i := len(allTokens) - 1; i > 0; i-- {
- token := allTokens[i]
- _, owned := ownedTokens[token]
-
- if rangeEnd == 0 {
- // we're looking for the end of the next range
- if owned {
- rangeEnd = token - 1
- }
- } else {
- // we have a range end, and are looking for the start of the range
- if !owned {
- ranges = append(ranges, rangeEnd, token)
- rangeEnd = 0
- }
- }
- }
-
- // finally look at the first token again
- // - if we have a range end, check if we claimed token 0
- // - if we don't, we have our start
- // - if we do, the start is 0
- // - if we don't have a range end, check if we claimed token 0
- // - if we don't, do nothing
- // - if we do, add the range of [0, token-1]
- // - BUT, if the token itself is 0, do nothing, because we don't own the tokens themselves (we should be covered by the already added range that ends with MaxUint32)
-
- if rangeEnd == 0 {
- if ownsFirstToken && firstToken != 0 {
- ranges = append(ranges, firstToken-1, 0)
- }
- } else {
- if ownsFirstToken {
- ranges = append(ranges, rangeEnd, 0)
- } else {
- ranges = append(ranges, rangeEnd, firstToken)
- }
- }
-
- // Ensure returned ranges are sorted.
- slices.Sort(ranges)
-
- return ranges, nil
-}
-
// runs a single round of compaction for all relevant tenants and tables
func (c *Compactor) runOne(ctx context.Context) error {
level.Info(c.logger).Log("msg", "running bloom compaction", "workers", c.cfg.WorkerParallelism)
diff --git a/pkg/bloomcompactor/bloomcompactor_test.go b/pkg/bloomcompactor/bloomcompactor_test.go
index 097e04d2a39a..0f4306494880 100644
--- a/pkg/bloomcompactor/bloomcompactor_test.go
+++ b/pkg/bloomcompactor/bloomcompactor_test.go
@@ -13,6 +13,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/pkg/bloomutils"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
util_log "github.com/grafana/loki/pkg/util/log"
lokiring "github.com/grafana/loki/pkg/util/ring"
@@ -251,7 +252,7 @@ func TestTokenRangesForInstance(t *testing.T) {
for desc, test := range tests {
t.Run(desc, func(t *testing.T) {
for id := range test.exp {
- ranges, err := tokenRangesForInstance(id, test.input)
+ ranges, err := bloomutils.TokenRangesForInstance(id, test.input)
if test.err {
require.Error(t, err)
continue
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index 721e0c35ca50..cf148f018bab 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -235,23 +235,23 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, t
return nil, errors.Wrap(err, "bloom gateway get healthy instances")
}
- servers, err := serverAddressesWithTokenRanges(subRing, rs.Instances)
+ servers, err := replicationSetsWithBounds(subRing, rs.Instances)
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "bloom gateway get replication sets")
}
- streamsByInst := groupFingerprintsByServer(groups, servers)
+ servers = partitionByReplicationSet(groups, servers)
filteredChunkRefs := groupedChunksRefPool.Get(len(groups))
defer groupedChunksRefPool.Put(filteredChunkRefs)
- for _, item := range streamsByInst {
+ for _, rs := range servers {
// randomize order of addresses so we don't hotspot the first server in the list
- addrs := shuffleAddrs(item.instance.addrs)
+ addrs := shuffleAddrs(rs.rs.GetAddresses())
err := c.doForAddrs(addrs, func(client logproto.BloomGatewayClient) error {
req := &logproto.FilterChunkRefRequest{
From: from,
Through: through,
- Refs: item.fingerprints,
+ Refs: rs.groups,
Filters: filters,
}
resp, err := client.FilterChunkRefs(ctx, req)
@@ -291,11 +291,6 @@ func (c *GatewayClient) doForAddrs(addrs []string, fn func(logproto.BloomGateway
return err
}
-func groupFingerprintsByServer(groups []*logproto.GroupedChunkRefs, servers []addrsWithBounds) []instanceWithFingerprints {
- boundedFingerprints := partitionFingerprintsByAddresses(groups, servers)
- return groupByInstance(boundedFingerprints)
-}
-
func mapTokenRangeToFingerprintRange(r bloomutils.Range[uint32]) v1.FingerprintBounds {
minFp := uint64(r.Min) << 32
maxFp := uint64(r.Max) << 32
@@ -305,117 +300,76 @@ func mapTokenRangeToFingerprintRange(r bloomutils.Range[uint32]) v1.FingerprintB
)
}
-func serverAddressesWithTokenRanges(subRing ring.ReadRing, instances []ring.InstanceDesc) ([]addrsWithBounds, error) {
+type rsWithRanges struct {
+ rs ring.ReplicationSet
+ ranges []v1.FingerprintBounds
+ groups []*logproto.GroupedChunkRefs
+}
+
+func replicationSetsWithBounds(subRing ring.ReadRing, instances []ring.InstanceDesc) ([]rsWithRanges, error) {
bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet()
- servers := make([]addrsWithBounds, 0, len(instances))
- it := bloomutils.NewInstanceSortMergeIterator(instances)
+ servers := make([]rsWithRanges, 0, len(instances))
+ for _, inst := range instances {
+ tr, err := subRing.GetTokenRangesForInstance(inst.Id)
+ if err != nil {
+ return nil, errors.Wrap(err, "bloom gateway get ring")
+ }
- for it.Next() {
- // We can use on of the tokens from the token range
- // to obtain all addresses for that token.
- rs, err := subRing.Get(it.At().TokenRange.Max, BlocksOwnerRead, bufDescs, bufHosts, bufZones)
+ rs, err := subRing.Get(tr[0], BlocksOwnerRead, bufDescs, bufHosts, bufZones)
if err != nil {
return nil, errors.Wrap(err, "bloom gateway get ring")
}
- bounds := mapTokenRangeToFingerprintRange(it.At().TokenRange)
- servers = append(servers, addrsWithBounds{
- id: it.At().Instance.Id,
- addrs: rs.GetAddresses(),
- FingerprintBounds: bounds,
- })
- }
+ bounds := make([]v1.FingerprintBounds, 0, len(tr)/2)
+ for i := 0; i < len(tr); i += 2 {
+ b := v1.NewBounds(
+ model.Fingerprint(uint64(tr[i])<<32),
+ model.Fingerprint(uint64(tr[i+1])<<32|math.MaxUint32),
+ )
+ bounds = append(bounds, b)
+ }
- if len(servers) > 0 && servers[len(servers)-1].Max < math.MaxUint64 {
- // append the instance for the range between the maxFp and MaxUint64
- // TODO(owen-d): support wrapping around keyspace for token ranges
- servers = append(servers, addrsWithBounds{
- id: servers[0].id,
- addrs: servers[0].addrs,
- FingerprintBounds: v1.NewBounds(
- servers[len(servers)-1].Max+1,
- model.Fingerprint(math.MaxUint64),
- ),
+ servers = append(servers, rsWithRanges{
+ rs: rs,
+ ranges: bounds,
})
}
return servers, nil
}
-type addrsWithBounds struct {
- v1.FingerprintBounds
- id string
- addrs []string
-}
-
-type instanceWithFingerprints struct {
- instance addrsWithBounds
- fingerprints []*logproto.GroupedChunkRefs
-}
-
-func partitionFingerprintsByAddresses(fingerprints []*logproto.GroupedChunkRefs, addresses []addrsWithBounds) (result []instanceWithFingerprints) {
- for _, instance := range addresses {
- min, _ := slices.BinarySearchFunc(fingerprints, instance.FingerprintBounds, func(g *logproto.GroupedChunkRefs, b v1.FingerprintBounds) int {
- if g.Fingerprint < uint64(b.Min) {
- return -1
- } else if g.Fingerprint > uint64(b.Min) {
- return 1
- }
- return 0
- })
-
- max, _ := slices.BinarySearchFunc(fingerprints, instance.FingerprintBounds, func(g *logproto.GroupedChunkRefs, b v1.FingerprintBounds) int {
- if g.Fingerprint <= uint64(b.Max) {
- return -1
- } else if g.Fingerprint > uint64(b.Max) {
- return 1
+func partitionByReplicationSet(fingerprints []*logproto.GroupedChunkRefs, rs []rsWithRanges) (result []rsWithRanges) {
+ for _, inst := range rs {
+ for _, bounds := range inst.ranges {
+ min, _ := slices.BinarySearchFunc(fingerprints, bounds, func(g *logproto.GroupedChunkRefs, b v1.FingerprintBounds) int {
+ if g.Fingerprint < uint64(b.Min) {
+ return -1
+ } else if g.Fingerprint > uint64(b.Min) {
+ return 1
+ }
+ return 0
+ })
+
+ max, _ := slices.BinarySearchFunc(fingerprints, bounds, func(g *logproto.GroupedChunkRefs, b v1.FingerprintBounds) int {
+ if g.Fingerprint <= uint64(b.Max) {
+ return -1
+ } else if g.Fingerprint > uint64(b.Max) {
+ return 1
+ }
+ return 0
+ })
+
+ // fingerprint is out of boundaries
+ if min == len(fingerprints) || max == 0 {
+ continue
}
- return 0
- })
- // fingerprint is out of boundaries
- if min == len(fingerprints) || max == 0 {
- continue
+ inst.groups = append(inst.groups, fingerprints[min:max]...)
}
- result = append(result, instanceWithFingerprints{instance: instance, fingerprints: fingerprints[min:max]})
- }
-
- return result
-}
-
-// groupByInstance groups fingerprints by server instance
-func groupByInstance(boundedFingerprints []instanceWithFingerprints) []instanceWithFingerprints {
- if len(boundedFingerprints) == 0 {
- return []instanceWithFingerprints{}
- }
-
- result := make([]instanceWithFingerprints, 0, len(boundedFingerprints))
- pos := make(map[string]int, len(boundedFingerprints))
-
- for _, cur := range boundedFingerprints {
- if len(cur.fingerprints) == 0 {
- continue
+ if len(inst.groups) > 0 {
+ result = append(result, inst)
}
- // Copy fingerprint slice, otherwise we mutate the original
- // TODO(chaudum): Use SlicePool
- tmp := make([]*logproto.GroupedChunkRefs, len(cur.fingerprints))
- _ = copy(tmp, cur.fingerprints)
-
- idx, ok := pos[cur.instance.id]
- if ok {
- result[idx].fingerprints = append(result[idx].fingerprints, tmp...)
- continue
- }
-
- pos[cur.instance.id] = len(result)
- result = append(result, instanceWithFingerprints{
- instance: addrsWithBounds{
- id: cur.instance.id,
- addrs: cur.instance.addrs,
- },
- fingerprints: tmp,
- })
}
return result
diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go
index 71ac0ec0639a..0280007443d8 100644
--- a/pkg/bloomgateway/client_test.go
+++ b/pkg/bloomgateway/client_test.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"math"
- "sort"
"testing"
"time"
@@ -21,6 +20,15 @@ import (
"github.com/grafana/loki/pkg/validation"
)
+func rs(id int, tokens ...uint32) ring.ReplicationSet {
+ inst := ring.InstanceDesc{
+ Id: fmt.Sprintf("instance-%d", id),
+ Addr: fmt.Sprintf("10.0.0.%d", id),
+ Tokens: tokens,
+ }
+ return ring.ReplicationSet{Instances: []ring.InstanceDesc{inst}}
+}
+
func TestBloomGatewayClient(t *testing.T) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
@@ -40,7 +48,68 @@ func TestBloomGatewayClient(t *testing.T) {
})
}
-func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
+func TestBloomGatewayClient_ReplicationSetsWithBounds(t *testing.T) {
+ testCases := map[string]struct {
+ instances []ring.InstanceDesc
+ expected []rsWithRanges
+ }{
+ "single instance covers full range": {
+ instances: []ring.InstanceDesc{
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{(1 << 31)}}, // 0x80000000
+ },
+ expected: []rsWithRanges{
+ {rs: rs(1, (1 << 31)), ranges: []v1.FingerprintBounds{
+ v1.NewBounds(0, math.MaxUint64),
+ }},
+ },
+ },
+ "one token per instance": {
+ instances: []ring.InstanceDesc{
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{(1 << 30) * 1}}, // 0x40000000
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{(1 << 30) * 2}}, // 0x80000000
+ {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{(1 << 30) * 3}}, // 0xc0000000
+ },
+ expected: []rsWithRanges{
+ {rs: rs(1, (1<<30)*1), ranges: []v1.FingerprintBounds{
+ v1.NewBounds(0, 4611686018427387903),
+ v1.NewBounds(13835058055282163712, 18446744073709551615),
+ }},
+ {rs: rs(2, (1<<30)*2), ranges: []v1.FingerprintBounds{
+ v1.NewBounds(4611686018427387904, 9223372036854775807),
+ }},
+ {rs: rs(3, (1<<30)*3), ranges: []v1.FingerprintBounds{
+ v1.NewBounds(9223372036854775808, 13835058055282163711),
+ }},
+ },
+ },
+ "extreme tokens in ring": {
+ instances: []ring.InstanceDesc{
+ {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{0}},
+ {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32}},
+ },
+ expected: []rsWithRanges{
+ {rs: rs(1, 0), ranges: []v1.FingerprintBounds{
+ v1.NewBounds(math.MaxUint64-math.MaxUint32, math.MaxUint64),
+ }},
+ {rs: rs(2, math.MaxUint32), ranges: []v1.FingerprintBounds{
+ v1.NewBounds(0, math.MaxUint64-math.MaxUint32-1),
+ }},
+ },
+ },
+ }
+
+ for name, tc := range testCases {
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ subRing := newMockRing(t, tc.instances)
+ res, err := replicationSetsWithBounds(subRing, tc.instances)
+ require.NoError(t, err)
+ require.Equal(t, tc.expected, res)
+ })
+ }
+}
+
+func TestBloomGatewayClient_PartitionByReplicationSet(t *testing.T) {
// Create 10 fingerprints [0, 2, 4, ... 18]
groups := make([]*logproto.GroupedChunkRefs, 0, 10)
for i := 0; i < 20; i += 2 {
@@ -50,129 +119,77 @@ func TestBloomGatewayClient_PartitionFingerprintsByAddresses(t *testing.T) {
// instance token ranges do not overlap
t.Run("non-overlapping", func(t *testing.T) {
- servers := []addrsWithBounds{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, 4)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(5, 9)},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, FingerprintBounds: v1.NewBounds(10, 14)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(15, 19)},
+ servers := []rsWithRanges{
+ {rs: rs(1), ranges: []v1.FingerprintBounds{v1.NewBounds(0, 4)}},
+ {rs: rs(2), ranges: []v1.FingerprintBounds{v1.NewBounds(5, 9), v1.NewBounds(15, 19)}},
+ {rs: rs(3), ranges: []v1.FingerprintBounds{v1.NewBounds(10, 14)}},
}
// partition fingerprints
- expected := []instanceWithFingerprints{
- {
- instance: servers[0],
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0},
- {Fingerprint: 2},
- {Fingerprint: 4},
- },
- },
+ expected := [][]*logproto.GroupedChunkRefs{
{
- instance: servers[1],
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 6},
- {Fingerprint: 8},
- },
+ {Fingerprint: 0},
+ {Fingerprint: 2},
+ {Fingerprint: 4},
},
{
- instance: servers[2],
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 10},
- {Fingerprint: 12},
- {Fingerprint: 14},
- },
+ {Fingerprint: 6},
+ {Fingerprint: 8},
+ {Fingerprint: 16},
+ {Fingerprint: 18},
},
{
- instance: servers[3],
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 16},
- {Fingerprint: 18},
- },
+ {Fingerprint: 10},
+ {Fingerprint: 12},
+ {Fingerprint: 14},
},
}
- bounded := partitionFingerprintsByAddresses(groups, servers)
- require.Equal(t, expected, bounded)
-
- // group fingerprints by instance
-
- expected = []instanceWithFingerprints{
- {
- instance: addrsWithBounds{id: "instance-1", addrs: []string{"10.0.0.1"}},
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0},
- {Fingerprint: 2},
- {Fingerprint: 4},
- },
- },
- {
- instance: addrsWithBounds{id: "instance-2", addrs: []string{"10.0.0.2"}},
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 6},
- {Fingerprint: 8},
- {Fingerprint: 16},
- {Fingerprint: 18},
- },
- },
- {
- instance: addrsWithBounds{id: "instance-3", addrs: []string{"10.0.0.3"}},
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 10},
- {Fingerprint: 12},
- {Fingerprint: 14},
- },
- },
+ partitioned := partitionByReplicationSet(groups, servers)
+ for i := range partitioned {
+ require.Equal(t, expected[i], partitioned[i].groups)
}
- result := groupByInstance(bounded)
- require.Equal(t, expected, result)
})
- // instance token ranges overlap
+ // instance token ranges overlap -- this should not happen in a real ring, though
t.Run("overlapping", func(t *testing.T) {
- servers := []addrsWithBounds{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, 9)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(5, 14)},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, FingerprintBounds: v1.NewBounds(10, 19)},
+ servers := []rsWithRanges{
+ {rs: rs(1), ranges: []v1.FingerprintBounds{v1.NewBounds(0, 9)}},
+ {rs: rs(2), ranges: []v1.FingerprintBounds{v1.NewBounds(5, 14)}},
+ {rs: rs(3), ranges: []v1.FingerprintBounds{v1.NewBounds(10, 19)}},
}
// partition fingerprints
- expected := []instanceWithFingerprints{
+ expected := [][]*logproto.GroupedChunkRefs{
{
- instance: servers[0],
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0},
- {Fingerprint: 2},
- {Fingerprint: 4},
- {Fingerprint: 6},
- {Fingerprint: 8},
- },
+ {Fingerprint: 0},
+ {Fingerprint: 2},
+ {Fingerprint: 4},
+ {Fingerprint: 6},
+ {Fingerprint: 8},
},
{
- instance: servers[1],
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 6},
- {Fingerprint: 8},
- {Fingerprint: 10},
- {Fingerprint: 12},
- {Fingerprint: 14},
- },
+ {Fingerprint: 6},
+ {Fingerprint: 8},
+ {Fingerprint: 10},
+ {Fingerprint: 12},
+ {Fingerprint: 14},
},
{
- instance: servers[2],
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 10},
- {Fingerprint: 12},
- {Fingerprint: 14},
- {Fingerprint: 16},
- {Fingerprint: 18},
- },
+ {Fingerprint: 10},
+ {Fingerprint: 12},
+ {Fingerprint: 14},
+ {Fingerprint: 16},
+ {Fingerprint: 18},
},
}
- bounded := partitionFingerprintsByAddresses(groups, servers)
- require.Equal(t, expected, bounded)
+ partitioned := partitionByReplicationSet(groups, servers)
+ for i := range partitioned {
+ require.Equal(t, expected[i], partitioned[i].groups)
+ }
})
}
@@ -187,22 +204,20 @@ func BenchmarkPartitionFingerprintsByAddresses(b *testing.B) {
numServers := 100
tokenStep := math.MaxUint32 / uint32(numServers)
- servers := make([]addrsWithBounds, 0, numServers)
+ servers := make([]rsWithRanges, 0, numServers)
for i := uint32(0); i < math.MaxUint32-tokenStep; i += tokenStep {
- servers = append(servers, addrsWithBounds{
- id: fmt.Sprintf("instance-%x", i),
- addrs: []string{fmt.Sprintf("%d", i)},
- FingerprintBounds: v1.NewBounds(
- model.Fingerprint(i)<<32,
- model.Fingerprint(i+tokenStep)<<32,
- ),
+ servers = append(servers, rsWithRanges{
+ rs: rs(int(i)),
+ ranges: []v1.FingerprintBounds{
+ v1.NewBounds(model.Fingerprint(i)<<32, model.Fingerprint(i+tokenStep)<<32),
+ },
})
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- _ = partitionFingerprintsByAddresses(groups, servers)
+ _ = partitionByReplicationSet(groups, servers)
}
}
@@ -229,194 +244,17 @@ func TestBloomGatewayClient_MapTokenRangeToFingerprintRange(t *testing.T) {
}
}
-func TestBloomGatewayClient_ServerAddressesWithTokenRanges(t *testing.T) {
- testCases := map[string]struct {
- instances []ring.InstanceDesc
- expected []addrsWithBounds
- }{
- "one token per instance, no gaps between fingerprint ranges": {
- instances: []ring.InstanceDesc{
- {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{(1 << 30) * 1}}, // 0x40000000
- {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{(1 << 30) * 2}}, // 0x80000000
- {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{(1 << 30) * 3}}, // 0xc0000000
- },
- expected: []addrsWithBounds{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, 4611686022722355199)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds(4611686022722355200, 9223372041149743103)},
- {id: "instance-3", addrs: []string{"10.0.0.3"}, FingerprintBounds: v1.NewBounds(9223372041149743104, 13835058059577131007)},
- {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(13835058059577131008, 18446744073709551615)},
- },
- },
- "MinUint32 and MaxUint32 are actual tokens in the ring": {
- instances: []ring.InstanceDesc{
- {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{0}},
- {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{math.MaxUint32}},
- },
- expected: []addrsWithBounds{
- {id: "instance-1", addrs: []string{"10.0.0.1"}, FingerprintBounds: v1.NewBounds(0, (1<<32)-1)},
- {id: "instance-2", addrs: []string{"10.0.0.2"}, FingerprintBounds: v1.NewBounds((1 << 32), math.MaxUint64)},
- },
- },
- }
-
- for name, tc := range testCases {
- tc := tc
- t.Run(name, func(t *testing.T) {
- subRing := newMockRing(tc.instances)
- res, err := serverAddressesWithTokenRanges(subRing, tc.instances)
- require.NoError(t, err)
- require.Equal(t, tc.expected, res)
- })
- }
-
-}
-
-func TestBloomGatewayClient_GroupFingerprintsByServer(t *testing.T) {
- instances := []ring.InstanceDesc{
- {Id: "instance-1", Addr: "10.0.0.1", Tokens: []uint32{0x1fffffff, 0x7fffffff}},
- {Id: "instance-2", Addr: "10.0.0.2", Tokens: []uint32{0x3fffffff, 0x9fffffff}},
- {Id: "instance-3", Addr: "10.0.0.3", Tokens: []uint32{0x5fffffff, 0xbfffffff}},
- }
-
- subRing := newMockRing(instances)
- servers, err := serverAddressesWithTokenRanges(subRing, instances)
- require.NoError(t, err)
-
- // for _, s := range servers {
- // t.Log(s, v1.NewBounds(model.Fingerprint(s.fpRange.Min), model.Fingerprint(s.fpRange.Max)))
- // }
- /**
- {instance-1 [10.0.0.1] { 0 536870911} { 0 2305843004918726656}} 0000000000000000-1fffffff00000000
- {instance-2 [10.0.0.2] { 536870912 1073741823} { 2305843009213693952 4611686014132420608}} 2000000000000000-3fffffff00000000
- {instance-3 [10.0.0.3] {1073741824 1610612735} { 4611686018427387904 6917529023346114560}} 4000000000000000-5fffffff00000000
- {instance-1 [10.0.0.1] {1610612736 2147483647} { 6917529027641081856 9223372032559808512}} 6000000000000000-7fffffff00000000
- {instance-2 [10.0.0.2] {2147483648 2684354559} { 9223372036854775808 11529215041773502464}} 8000000000000000-9fffffff00000000
- {instance-3 [10.0.0.3] {2684354560 3221225471} {11529215046068469760 13835058050987196416}} a000000000000000-bfffffff00000000
- {instance-1 [10.0.0.1] {3221225472 4294967295} {13835058055282163712 18446744073709551615}} c000000000000000-ffffffffffffffff
- **/
-
- testCases := []struct {
- name string
- chunks []*logproto.GroupedChunkRefs
- expected []instanceWithFingerprints
- }{
- {
- name: "empty input yields empty result",
- chunks: []*logproto.GroupedChunkRefs{},
- expected: []instanceWithFingerprints{},
- },
- {
- name: "fingerprints within a single token range are grouped",
- chunks: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x5000000000000001},
- {Fingerprint: 0x5000000000000010},
- {Fingerprint: 0x5000000000000100},
- },
- expected: []instanceWithFingerprints{
- {
- instance: addrsWithBounds{
- id: "instance-3",
- addrs: []string{"10.0.0.3"},
- },
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x5000000000000001},
- {Fingerprint: 0x5000000000000010},
- {Fingerprint: 0x5000000000000100},
- },
- },
- },
- },
- {
- name: "fingerprints within multiple token ranges of a single instance are grouped",
- chunks: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x1000000000000000},
- {Fingerprint: 0x7000000000000000},
- {Fingerprint: 0xd000000000000000},
- },
- expected: []instanceWithFingerprints{
- {
- instance: addrsWithBounds{
- id: "instance-1",
- addrs: []string{"10.0.0.1"},
- },
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x1000000000000000},
- {Fingerprint: 0x7000000000000000},
- {Fingerprint: 0xd000000000000000},
- },
- },
- },
- },
- {
- name: "fingerprints with token ranges of multiple instances are grouped",
- chunks: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x1000000000000000},
- {Fingerprint: 0x3000000000000000},
- {Fingerprint: 0x5000000000000000},
- {Fingerprint: 0x7000000000000000},
- {Fingerprint: 0x9000000000000000},
- {Fingerprint: 0xb000000000000000},
- {Fingerprint: 0xd000000000000000},
- {Fingerprint: 0xf000000000000000},
- },
- expected: []instanceWithFingerprints{
- {
- instance: addrsWithBounds{
- id: "instance-1",
- addrs: []string{"10.0.0.1"},
- },
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x1000000000000000},
- {Fingerprint: 0x7000000000000000},
- {Fingerprint: 0xd000000000000000},
- {Fingerprint: 0xf000000000000000},
- },
- },
- {
- instance: addrsWithBounds{
- id: "instance-2",
- addrs: []string{"10.0.0.2"},
- },
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x3000000000000000},
- {Fingerprint: 0x9000000000000000},
- },
- },
- {
- instance: addrsWithBounds{
- id: "instance-3",
- addrs: []string{"10.0.0.3"},
- },
- fingerprints: []*logproto.GroupedChunkRefs{
- {Fingerprint: 0x5000000000000000},
- {Fingerprint: 0xb000000000000000},
- },
- },
- },
- },
- }
-
- for _, tc := range testCases {
- tc := tc
- t.Run(tc.name, func(t *testing.T) {
- // sort chunks here, to be able to write more human readable test input
- sort.Slice(tc.chunks, func(i, j int) bool {
- return tc.chunks[i].Fingerprint < tc.chunks[j].Fingerprint
- })
- res := groupFingerprintsByServer(tc.chunks, servers)
- require.Equal(t, tc.expected, res)
- })
- }
-}
-
// make sure mockRing implements the ring.ReadRing interface
var _ ring.ReadRing = &mockRing{}
-func newMockRing(instances []ring.InstanceDesc) *mockRing {
- it := bloomutils.NewInstanceSortMergeIterator(instances)
- ranges := make([]bloomutils.InstanceWithTokenRange, 0)
- for it.Next() {
- ranges = append(ranges, it.At())
+func newMockRing(t *testing.T, instances []ring.InstanceDesc) *mockRing {
+ ranges := make([]ring.TokenRanges, 0)
+ for i := range instances {
+ tr, err := bloomutils.TokenRangesForInstance(instances[i].Id, instances)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ranges = append(ranges, tr)
}
return &mockRing{
instances: instances,
@@ -426,21 +264,17 @@ func newMockRing(instances []ring.InstanceDesc) *mockRing {
type mockRing struct {
instances []ring.InstanceDesc
- ranges []bloomutils.InstanceWithTokenRange
+ ranges []ring.TokenRanges
}
// Get implements ring.ReadRing.
-func (r *mockRing) Get(key uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
- idx, _ := sort.Find(len(r.ranges), func(i int) int {
- if r.ranges[i].TokenRange.Max < key {
- return 1
+func (r *mockRing) Get(key uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (rs ring.ReplicationSet, err error) {
+ for i := range r.ranges {
+ if r.ranges[i].IncludesKey(key) {
+ rs.Instances = append(rs.Instances, r.instances[i])
}
- if r.ranges[i].TokenRange.Max > key {
- return -1
- }
- return 0
- })
- return ring.ReplicationSet{Instances: []ring.InstanceDesc{r.ranges[idx].Instance}}, nil
+ }
+ return
}
// GetAllHealthy implements ring.ReadRing.
@@ -490,7 +324,6 @@ func (*mockRing) CleanupShuffleShardCache(_ string) {
panic("unimplemented")
}
-func (r *mockRing) GetTokenRangesForInstance(_ string) (ring.TokenRanges, error) {
- tr := ring.TokenRanges{0, math.MaxUint32}
- return tr, nil
+func (r *mockRing) GetTokenRangesForInstance(id string) (ring.TokenRanges, error) {
+ return bloomutils.TokenRangesForInstance(id, r.instances)
}
diff --git a/pkg/bloomutils/ring.go b/pkg/bloomutils/ring.go
index bc58bf09c886..9858f63e6ba3 100644
--- a/pkg/bloomutils/ring.go
+++ b/pkg/bloomutils/ring.go
@@ -3,6 +3,7 @@
package bloomutils
import (
+ "errors"
"fmt"
"math"
"sort"
@@ -10,6 +11,7 @@ import (
"github.com/grafana/dskit/ring"
"github.com/prometheus/common/model"
"golang.org/x/exp/constraints"
+ "golang.org/x/exp/slices"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
@@ -83,39 +85,94 @@ func KeyspacesFromTokenRanges(tokenRanges ring.TokenRanges) []v1.FingerprintBoun
return keyspaces
}
-// NewInstanceSortMergeIterator creates an iterator that yields instanceWithToken elements
-// where the token of the elements are sorted in ascending order.
-func NewInstanceSortMergeIterator(instances []ring.InstanceDesc) v1.Iterator[InstanceWithTokenRange] {
- tokenIters := make([]v1.PeekingIterator[v1.IndexedValue[uint32]], 0, len(instances))
- for i, inst := range instances {
- sort.Slice(inst.Tokens, func(a, b int) bool { return inst.Tokens[a] < inst.Tokens[b] })
- itr := v1.NewIterWithIndex(v1.NewSliceIter[uint32](inst.Tokens), i)
- tokenIters = append(tokenIters, v1.NewPeekingIter[v1.IndexedValue[uint32]](itr))
+func TokenRangesForInstance(id string, instances []ring.InstanceDesc) (ranges ring.TokenRanges, err error) {
+ var ownedTokens map[uint32]struct{}
+
+ // lifted from grafana/dskit/ring/model.go <*Desc>.GetTokens()
+ toks := make([][]uint32, 0, len(instances))
+ for _, instance := range instances {
+ if instance.Id == id {
+ ranges = make(ring.TokenRanges, 0, 2*(len(instance.Tokens)+1))
+ ownedTokens = make(map[uint32]struct{}, len(instance.Tokens))
+ for _, tok := range instance.Tokens {
+ ownedTokens[tok] = struct{}{}
+ }
+ }
+
+ // Tokens may not be sorted for an older version which, so we enforce sorting here.
+ tokens := instance.Tokens
+ if !sort.IsSorted(ring.Tokens(tokens)) {
+ sort.Sort(ring.Tokens(tokens))
+ }
+
+ toks = append(toks, tokens)
+ }
+
+ if cap(ranges) == 0 {
+ return nil, fmt.Errorf("instance %s not found", id)
+ }
+
+ allTokens := ring.MergeTokens(toks)
+ if len(allTokens) == 0 {
+ return nil, errors.New("no tokens in the ring")
+ }
+
+ // mostly lifted from grafana/dskit/ring/token_range.go <*Ring>.GetTokenRangesForInstance()
+
+ // non-zero value means we're now looking for start of the range. Zero value means we're looking for next end of range (ie. token owned by this instance).
+ rangeEnd := uint32(0)
+
+ // if this instance claimed the first token, it owns the wrap-around range, which we'll break into two separate ranges
+ firstToken := allTokens[0]
+ _, ownsFirstToken := ownedTokens[firstToken]
+
+ if ownsFirstToken {
+ // we'll start by looking for the beginning of the range that ends with math.MaxUint32
+ rangeEnd = math.MaxUint32
}
- heapIter := v1.NewHeapIterator[v1.IndexedValue[uint32]](
- func(iv1, iv2 v1.IndexedValue[uint32]) bool {
- return iv1.Value() < iv2.Value()
- },
- tokenIters...,
- )
-
- prevToken := -1
- return v1.NewDedupingIter[v1.IndexedValue[uint32], InstanceWithTokenRange](
- func(iv v1.IndexedValue[uint32], iwtr InstanceWithTokenRange) bool {
- return false
- },
- func(iv v1.IndexedValue[uint32]) InstanceWithTokenRange {
- minToken, maxToken := uint32(prevToken+1), iv.Value()
- prevToken = int(maxToken)
- return InstanceWithTokenRange{
- Instance: instances[iv.Index()],
- TokenRange: NewTokenRange(minToken, maxToken),
+ // walk the ring backwards, alternating looking for ends and starts of ranges
+ for i := len(allTokens) - 1; i > 0; i-- {
+ token := allTokens[i]
+ _, owned := ownedTokens[token]
+
+ if rangeEnd == 0 {
+ // we're looking for the end of the next range
+ if owned {
+ rangeEnd = token - 1
}
- },
- func(iv v1.IndexedValue[uint32], iwtr InstanceWithTokenRange) InstanceWithTokenRange {
- panic("must not be called, because Eq() is always false")
- },
- v1.NewPeekingIter(heapIter),
- )
+ } else {
+ // we have a range end, and are looking for the start of the range
+ if !owned {
+ ranges = append(ranges, rangeEnd, token)
+ rangeEnd = 0
+ }
+ }
+ }
+
+ // finally look at the first token again
+ // - if we have a range end, check if we claimed token 0
+ // - if we don't, we have our start
+ // - if we do, the start is 0
+ // - if we don't have a range end, check if we claimed token 0
+ // - if we don't, do nothing
+ // - if we do, add the range of [0, token-1]
+ // - BUT, if the token itself is 0, do nothing, because we don't own the tokens themselves (we should be covered by the already added range that ends with MaxUint32)
+
+ if rangeEnd == 0 {
+ if ownsFirstToken && firstToken != 0 {
+ ranges = append(ranges, firstToken-1, 0)
+ }
+ } else {
+ if ownsFirstToken {
+ ranges = append(ranges, rangeEnd, 0)
+ } else {
+ ranges = append(ranges, rangeEnd, firstToken)
+ }
+ }
+
+ // Ensure returned ranges are sorted.
+ slices.Sort(ranges)
+
+ return ranges, nil
}
diff --git a/pkg/bloomutils/ring_test.go b/pkg/bloomutils/ring_test.go
index 47ebb4766490..a6ef7374f527 100644
--- a/pkg/bloomutils/ring_test.go
+++ b/pkg/bloomutils/ring_test.go
@@ -11,34 +11,6 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
-func TestBloomGatewayClient_InstanceSortMergeIterator(t *testing.T) {
- // | 0 1 2 3 4 5 6 7 8 9 |
- // ---------+---------------------+
- // ID 1 | ***o ***o |
- // ID 2 | ***o ***o |
- // ID 3 | **o |
- input := []ring.InstanceDesc{
- {Id: "1", Tokens: []uint32{5, 9}},
- {Id: "2", Tokens: []uint32{3, 7}},
- {Id: "3", Tokens: []uint32{1}},
- }
- expected := []InstanceWithTokenRange{
- {Instance: input[2], TokenRange: NewTokenRange(0, 1)},
- {Instance: input[1], TokenRange: NewTokenRange(2, 3)},
- {Instance: input[0], TokenRange: NewTokenRange(4, 5)},
- {Instance: input[1], TokenRange: NewTokenRange(6, 7)},
- {Instance: input[0], TokenRange: NewTokenRange(8, 9)},
- }
-
- var i int
- it := NewInstanceSortMergeIterator(input)
- for it.Next() {
- t.Log(expected[i], it.At())
- require.Equal(t, expected[i], it.At())
- i++
- }
-}
-
func uint64Range(min, max uint64) Range[uint64] {
return Range[uint64]{min, max}
}
From 32a9a3f45d2017e51c77432ca979f7deeb9794f6 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Thu, 22 Feb 2024 09:01:17 +0100
Subject: [PATCH 109/130] chore(blooms): Improve how block directory is
extracted (#12030)
This PR contains two changes:
1) Extracting the block without an intermediate temp file.
The commit is cherry-picked from https://github.com/grafana/loki/pull/12021, but that PR got merged into the wrong branch :see_no_evil:
2) Strip `.tar.gz` suffix from block file when extracting it into a directory.
Signed-off-by: Christian Haudum
---
.../stores/shipper/bloomshipper/cache_test.go | 15 +-----
.../stores/shipper/bloomshipper/client.go | 20 ++++++--
.../shipper/bloomshipper/compress_utils.go | 50 -------------------
.../bloomshipper/compress_utils_test.go | 28 ++---------
.../stores/shipper/bloomshipper/fetcher.go | 4 ++
.../shipper/bloomshipper/fetcher_test.go | 7 +--
6 files changed, 29 insertions(+), 95 deletions(-)
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
index c85f0382bafd..dc078ab702c0 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
@@ -30,9 +30,6 @@ func TestBlockDirectory_Cleanup(t *testing.T) {
tc := tc
t.Run(name, func(t *testing.T) {
extractedBlockDirectory := t.TempDir()
- blockFilePath, _, _, _ := createBlockArchive(t)
- err := extractArchive(blockFilePath, extractedBlockDirectory)
- require.NoError(t, err)
require.DirExists(t, extractedBlockDirectory)
blockDir := BlockDirectory{
@@ -61,20 +58,10 @@ func TestBlockDirectory_Cleanup(t *testing.T) {
}
func Test_ClosableBlockQuerier(t *testing.T) {
- blockFilePath, _, _, _ := createBlockArchive(t)
- extractedBlockDirectory := t.TempDir()
- err := extractArchive(blockFilePath, extractedBlockDirectory)
- require.NoError(t, err)
-
- blockDir := BlockDirectory{
- Path: extractedBlockDirectory,
- removeDirectoryTimeout: 100 * time.Millisecond,
- refCount: atomic.NewInt32(0),
- }
+ blockDir := NewBlockDirectory(BlockRef{}, t.TempDir(), log.NewNopLogger())
querier := blockDir.BlockQuerier()
require.Equal(t, int32(1), blockDir.refCount.Load())
require.NoError(t, querier.Close())
require.Equal(t, int32(0), blockDir.refCount.Load())
-
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 240f2b516658..6ee47d78d578 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -7,6 +7,7 @@ import (
"fmt"
"hash"
"io"
+ "strings"
"github.com/go-kit/log"
"github.com/grafana/dskit/concurrency"
@@ -15,6 +16,7 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client"
+ "github.com/grafana/loki/pkg/storage/chunk/client/util"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb"
"github.com/grafana/loki/pkg/util/encoding"
@@ -264,18 +266,28 @@ func (b *BloomClient) DeleteMetas(ctx context.Context, refs []MetaRef) error {
return err
}
-// GetBlock downloads the blocks from objectStorage and returns the downloaded block
+// GetBlock downloads the blocks from objectStorage and returns the directory
+// in which the block data resides
func (b *BloomClient) GetBlock(ctx context.Context, ref BlockRef) (BlockDirectory, error) {
key := b.Block(ref).Addr()
- readCloser, _, err := b.client.GetObject(ctx, key)
+
+ rc, _, err := b.client.GetObject(ctx, key)
if err != nil {
return BlockDirectory{}, fmt.Errorf("failed to get block from storage: %w", err)
}
+ defer rc.Close()
path := b.fsResolver.Block(ref).LocalPath()
- err = extractBlock(readCloser, path, b.logger)
+ // the block directory should not contain the .tar.gz extension
+ path = strings.TrimSuffix(path, ".tar.gz")
+ err = util.EnsureDirectory(path)
+ if err != nil {
+ return BlockDirectory{}, fmt.Errorf("failed to create block directory: %w", err)
+ }
+
+ err = v1.UnTarGz(path, rc)
if err != nil {
- return BlockDirectory{}, fmt.Errorf("failed to extract block into directory : %w", err)
+ return BlockDirectory{}, fmt.Errorf("failed to extract block: %w", err)
}
return NewBlockDirectory(ref, path, b.logger), nil
diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
index 332c900fe29d..57025113cea7 100644
--- a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
+++ b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
@@ -1,14 +1,10 @@
package bloomshipper
import (
- "fmt"
- "io"
"os"
- "path/filepath"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/google/uuid"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
@@ -31,49 +27,3 @@ func CompressBloomBlock(ref BlockRef, archivePath, localDst string, logger log.L
return blockToUpload, nil
}
-
-func writeDataToTempFile(workingDirectoryPath string, data io.ReadCloser) (string, error) {
- defer data.Close()
- archivePath := filepath.Join(workingDirectoryPath, uuid.New().String())
-
- archiveFile, err := os.Create(archivePath)
- if err != nil {
- return "", fmt.Errorf("error creating empty file to store the archiver: %w", err)
- }
- defer archiveFile.Close()
- _, err = io.Copy(archiveFile, data)
- if err != nil {
- return "", fmt.Errorf("error writing data to archive file: %w", err)
- }
- return archivePath, nil
-}
-
-func extractArchive(archivePath string, workingDirectoryPath string) error {
- file, err := os.Open(archivePath)
- if err != nil {
- return fmt.Errorf("error opening archive file %s: %w", archivePath, err)
- }
- return v1.UnTarGz(workingDirectoryPath, file)
-}
-
-func extractBlock(data io.ReadCloser, blockDir string, logger log.Logger) error {
- err := os.MkdirAll(blockDir, os.ModePerm)
- if err != nil {
- return fmt.Errorf("can not create directory to extract the block: %w", err)
- }
- archivePath, err := writeDataToTempFile(blockDir, data)
- if err != nil {
- return fmt.Errorf("error writing data to temp file: %w", err)
- }
- defer func() {
- err = os.Remove(archivePath)
- if err != nil {
- level.Error(logger).Log("msg", "error removing temp archive file", "err", err)
- }
- }()
- err = extractArchive(archivePath, blockDir)
- if err != nil {
- return fmt.Errorf("error extracting archive: %w", err)
- }
- return nil
-}
diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
index 4fddf8e9c348..11a6afb21af4 100644
--- a/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
@@ -13,28 +13,6 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
-func Test_blockDownloader_extractBlock(t *testing.T) {
- blockFilePath, _, bloomFileContent, seriesFileContent := createBlockArchive(t)
- blockFile, err := os.OpenFile(blockFilePath, os.O_RDONLY, 0700)
- require.NoError(t, err)
-
- workingDir := t.TempDir()
-
- err = extractBlock(blockFile, workingDir, nil)
- require.NoError(t, err)
-
- require.FileExists(t, filepath.Join(workingDir, v1.BloomFileName))
- require.FileExists(t, filepath.Join(workingDir, v1.SeriesFileName))
-
- actualBloomFileContent, err := os.ReadFile(filepath.Join(workingDir, v1.BloomFileName))
- require.NoError(t, err)
- require.Equal(t, bloomFileContent, string(actualBloomFileContent))
-
- actualSeriesFileContent, err := os.ReadFile(filepath.Join(workingDir, v1.SeriesFileName))
- require.NoError(t, err)
- require.Equal(t, seriesFileContent, string(actualSeriesFileContent))
-}
-
func directoryDoesNotExist(path string) bool {
_, err := os.Lstat(path)
return err != nil
@@ -42,7 +20,7 @@ func directoryDoesNotExist(path string) bool {
const testArchiveFileName = "test-block-archive"
-func createBlockArchive(t *testing.T) (string, string, string, string) {
+func createBlockArchive(t *testing.T) (string, io.Reader, string, string) {
dir := t.TempDir()
mockBlockDir := filepath.Join(dir, "mock-block-dir")
err := os.MkdirAll(mockBlockDir, 0777)
@@ -65,5 +43,7 @@ func createBlockArchive(t *testing.T) (string, string, string, string) {
err = v1.TarGz(file, v1.NewDirectoryBlockReader(mockBlockDir))
require.NoError(t, err)
- return blockFilePath, mockBlockDir, bloomFileContent, seriesFileContent
+ _, _ = file.Seek(0, 0)
+
+ return blockFilePath, file, bloomFileContent, seriesFileContent
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index 61dac17b21d8..366b37ec96dd 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"os"
"path/filepath"
+ "strings"
"sync"
"github.com/go-kit/log"
@@ -222,6 +223,9 @@ func (f *Fetcher) loadBlocksFromFS(_ context.Context, refs []BlockRef) ([]BlockD
for _, ref := range refs {
path := f.localFSResolver.Block(ref).LocalPath()
+ // the block directory does not contain the .tar.gz extension
+ // since it is stripped when the archive is extracted into a folder
+ path = strings.TrimSuffix(path, ".tar.gz")
if ok, clean := f.isBlockDir(path); ok {
blockDirs = append(blockDirs, NewBlockDirectory(ref, path, f.logger))
} else {
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
index 962bebb9956f..cb89f6ef6b45 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
@@ -6,6 +6,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "strings"
"testing"
"time"
@@ -152,9 +153,9 @@ func TestFetcher_LoadBlocksFromFS(t *testing.T) {
{Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x2000, 0x2fff)}},
}
dirs := []string{
- resolver.Block(refs[0]).LocalPath(),
- resolver.Block(refs[1]).LocalPath(),
- resolver.Block(refs[2]).LocalPath(),
+ strings.TrimSuffix(resolver.Block(refs[0]).LocalPath(), ".tar.gz"),
+ strings.TrimSuffix(resolver.Block(refs[1]).LocalPath(), ".tar.gz"),
+ strings.TrimSuffix(resolver.Block(refs[2]).LocalPath(), ".tar.gz"),
}
createBlockDir(t, dirs[1])
From 817c516bd27ff2974f18dd392cc1fc7e0d2f1e5b Mon Sep 17 00:00:00 2001
From: Sheikh-Abubaker
Date: Thu, 22 Feb 2024 14:36:54 +0530
Subject: [PATCH 110/130] feat: area/helm: Added missing default values to
support ServerSideApply (#11567)
Signed-off-by: Sheikh-Abubaker
Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>
Co-authored-by: Karsten Jeschkies
---
production/helm/loki/CHANGELOG.md | 4 ++++
production/helm/loki/Chart.yaml | 2 +-
production/helm/loki/README.md | 2 +-
production/helm/loki/templates/monitoring/pod-logs.yaml | 6 ++++--
4 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 59487c984d6e..0ec4124ab9ed 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 5.43.3
+
+- [ENHANCEMENT] Added missing default values to support ServerSideApply
+
## 5.43.2
- [BUGFIX] Added `alibabacloud` to `isUsingObjectStorage` check.
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index c20abdba25d8..fc9b3a32e434 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.9.4
-version: 5.43.2
+version: 5.43.3
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 86a13201c2eb..30f3b7579025 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-![Version: 5.43.2](https://img.shields.io/badge/Version-5.43.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
+![Version: 5.43.3](https://img.shields.io/badge/Version-5.43.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.9.4](https://img.shields.io/badge/AppVersion-2.9.4-informational?style=flat-square)
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/templates/monitoring/pod-logs.yaml b/production/helm/loki/templates/monitoring/pod-logs.yaml
index 4445f3e76077..3fc95c44e22b 100644
--- a/production/helm/loki/templates/monitoring/pod-logs.yaml
+++ b/production/helm/loki/templates/monitoring/pod-logs.yaml
@@ -19,7 +19,8 @@ spec:
pipelineStages:
- cri: { }
relabelings:
- - sourceLabels:
+ - action: replace
+ sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: __host__
- action: labelmap
@@ -42,7 +43,8 @@ spec:
sourceLabels:
- __meta_kubernetes_pod_container_name
targetLabel: container
- - replacement: "{{ include "loki.clusterLabel" $ }}"
+ - action: replace
+ replacement: "{{ include "loki.clusterLabel" $ }}"
targetLabel: cluster
{{- with .relabelings }}
{{- toYaml . | nindent 4 }}
From b9ce005ec1cd8cf1bb448fce7a312dd47037a87b Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Thu, 22 Feb 2024 10:41:39 +0100
Subject: [PATCH 111/130] fix: Ensure working dir for bloomstore exists
(#12019)
Fail startup if directory does not exist or there are not enough permissions. This prevents the bloomstore to fail later in the process once it tries to download and extract blocks.
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/bloomgateway_test.go | 7 +++
pkg/loki/config_wrapper.go | 2 +-
pkg/loki/config_wrapper_test.go | 2 +
pkg/loki/modules_test.go | 11 ++--
pkg/storage/chunk/client/util/util.go | 2 +
.../shipper/bloomshipper/shipper_test.go | 2 +-
.../stores/shipper/bloomshipper/store.go | 7 ++-
.../stores/shipper/bloomshipper/store_test.go | 53 ++++++++++++++++---
8 files changed, 72 insertions(+), 14 deletions(-)
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index fede86484a96..9a4dea08dba2 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -26,6 +26,7 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/config"
+ bloomshipperconfig "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
lokiring "github.com/grafana/loki/pkg/util/ring"
"github.com/grafana/loki/pkg/validation"
)
@@ -70,6 +71,9 @@ func TestBloomGateway_StartStopService(t *testing.T) {
Configs: []config.PeriodConfig{p},
}
storageCfg := storage.Config{
+ BloomShipperConfig: bloomshipperconfig.Config{
+ WorkingDirectory: t.TempDir(),
+ },
FSConfig: local.FSConfig{
Directory: t.TempDir(),
},
@@ -136,6 +140,9 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
Configs: []config.PeriodConfig{p},
}
storageCfg := storage.Config{
+ BloomShipperConfig: bloomshipperconfig.Config{
+ WorkingDirectory: t.TempDir(),
+ },
FSConfig: local.FSConfig{
Directory: t.TempDir(),
},
diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go
index 1914c8ab3edf..f76e0f75da9f 100644
--- a/pkg/loki/config_wrapper.go
+++ b/pkg/loki/config_wrapper.go
@@ -407,7 +407,7 @@ func applyPathPrefixDefaults(r, defaults *ConfigWrapper) {
r.CompactorConfig.WorkingDirectory = fmt.Sprintf("%s/compactor", prefix)
}
if r.StorageConfig.BloomShipperConfig.WorkingDirectory == defaults.StorageConfig.BloomShipperConfig.WorkingDirectory {
- r.StorageConfig.BloomShipperConfig.WorkingDirectory = fmt.Sprintf("%s/bloom-shipper", prefix)
+ r.StorageConfig.BloomShipperConfig.WorkingDirectory = fmt.Sprintf("%s/blooms", prefix)
}
}
}
diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go
index 3b1237dad4d1..60c9223732d0 100644
--- a/pkg/loki/config_wrapper_test.go
+++ b/pkg/loki/config_wrapper_test.go
@@ -100,6 +100,7 @@ common:
assert.EqualValues(t, "/opt/loki/rules-temp", config.Ruler.RulePath)
assert.EqualValues(t, "/opt/loki/wal", config.Ingester.WAL.Dir)
assert.EqualValues(t, "/opt/loki/compactor", config.CompactorConfig.WorkingDirectory)
+ assert.EqualValues(t, "/opt/loki/blooms", config.StorageConfig.BloomShipperConfig.WorkingDirectory)
})
t.Run("accepts paths both with and without trailing slash", func(t *testing.T) {
@@ -111,6 +112,7 @@ common:
assert.EqualValues(t, "/opt/loki/rules-temp", config.Ruler.RulePath)
assert.EqualValues(t, "/opt/loki/wal", config.Ingester.WAL.Dir)
assert.EqualValues(t, "/opt/loki/compactor", config.CompactorConfig.WorkingDirectory)
+ assert.EqualValues(t, "/opt/loki/blooms", config.StorageConfig.BloomShipperConfig.WorkingDirectory)
})
t.Run("does not rewrite custom (non-default) paths passed via config file", func(t *testing.T) {
diff --git a/pkg/loki/modules_test.go b/pkg/loki/modules_test.go
index 0d07242b7537..047ba5f838a5 100644
--- a/pkg/loki/modules_test.go
+++ b/pkg/loki/modules_test.go
@@ -2,7 +2,6 @@ package loki
import (
"fmt"
- "path"
"path/filepath"
"testing"
"time"
@@ -17,6 +16,7 @@ import (
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/config"
+ bloomshipperconfig "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/boltdb"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/indexgateway"
@@ -366,10 +366,13 @@ func minimalWorkingConfig(t *testing.T, dir, target string, cfgTransformers ...f
// This would be overwritten by the default values setting.
cfg.StorageConfig = storage.Config{
FSConfig: local.FSConfig{Directory: dir},
+ BloomShipperConfig: bloomshipperconfig.Config{
+ WorkingDirectory: filepath.Join(dir, "blooms"),
+ },
BoltDBShipperConfig: boltdb.IndexCfg{
Config: indexshipper.Config{
- ActiveIndexDirectory: path.Join(dir, "index"),
- CacheLocation: path.Join(dir, "cache"),
+ ActiveIndexDirectory: filepath.Join(dir, "index"),
+ CacheLocation: filepath.Join(dir, "cache"),
Mode: indexshipper.ModeWriteOnly,
ResyncInterval: 24 * time.Hour,
},
@@ -402,7 +405,7 @@ func minimalWorkingConfig(t *testing.T, dir, target string, cfgTransformers ...f
cfg.BloomCompactor.Ring.InstanceAddr = localhost
cfg.BloomGateway.Ring.InstanceAddr = localhost
cfg.CompactorConfig.CompactorRing.InstanceAddr = localhost
- cfg.CompactorConfig.WorkingDirectory = path.Join(dir, "compactor")
+ cfg.CompactorConfig.WorkingDirectory = filepath.Join(dir, "compactor")
cfg.Ruler.Config.Ring.InstanceAddr = localhost
cfg.Ruler.Config.StoreConfig.Type = config.StorageTypeLocal
diff --git a/pkg/storage/chunk/client/util/util.go b/pkg/storage/chunk/client/util/util.go
index 10237cc456da..e49fad20136f 100644
--- a/pkg/storage/chunk/client/util/util.go
+++ b/pkg/storage/chunk/client/util/util.go
@@ -72,6 +72,8 @@ func EnsureDirectory(dir string) error {
return os.MkdirAll(dir, 0o777)
} else if err == nil && !info.IsDir() {
return fmt.Errorf("not a directory: %s", dir)
+ } else if err == nil && info.Mode()&0700 != 0700 {
+ return fmt.Errorf("insufficient permissions: %s %s", dir, info.Mode())
}
return err
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
index e03d72c26ba3..86e8ed90a174 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
@@ -142,7 +142,7 @@ func TestBloomShipper_IsOutsideRange(t *testing.T) {
func TestBloomShipper_ForEach(t *testing.T) {
blockRefs := make([]BlockRef, 0, 3)
- store, _ := newMockBloomStore(t)
+ store, _, _ := newMockBloomStore(t)
for i := 0; i < len(blockRefs); i++ {
block, err := createBlockInStorage(t, store, "tenant", model.Time(i*24*int(time.Hour)), 0x0000, 0x00ff)
require.NoError(t, err)
diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go
index d5cfa24b11ed..56bfb3ebe97a 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store.go
@@ -15,6 +15,7 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/client"
+ "github.com/grafana/loki/pkg/storage/chunk/client/util"
"github.com/grafana/loki/pkg/storage/config"
)
@@ -172,6 +173,10 @@ func NewBloomStore(
numWorkers: storageConfig.BloomShipperConfig.BlocksDownloadingQueue.WorkersCount,
}
+ if err := util.EnsureDirectory(cfg.workingDir); err != nil {
+ return nil, errors.Wrapf(err, "failed to create working directory for bloom store: '%s'", cfg.workingDir)
+ }
+
for _, periodicConfig := range periodicConfigs {
objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, storageConfig, clientMetrics)
if err != nil {
@@ -323,10 +328,10 @@ func (b *BloomStore) FetchBlocks(ctx context.Context, blocks []BlockRef) ([]*Clo
results := make([]*CloseableBlockQuerier, 0, len(blocks))
for i := range fetchers {
res, err := fetchers[i].FetchBlocks(ctx, refs[i])
- results = append(results, res...)
if err != nil {
return results, err
}
+ results = append(results, res...)
}
// sort responses (results []*CloseableBlockQuerier) based on requests (blocks []BlockRef)
diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go
index 59d8eee46405..48ab81cc4502 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"os"
+ "path/filepath"
"testing"
"time"
@@ -20,9 +21,12 @@ import (
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
)
-func newMockBloomStore(t *testing.T) (*BloomStore, string) {
+func newMockBloomStore(t *testing.T) (*BloomStore, string, error) {
workDir := t.TempDir()
+ return newMockBloomStoreWithWorkDir(t, workDir)
+}
+func newMockBloomStoreWithWorkDir(t *testing.T, workDir string) (*BloomStore, string, error) {
periodicConfigs := []storageconfig.PeriodConfig{
{
ObjectType: storageconfig.StorageTypeInMemory,
@@ -63,11 +67,13 @@ func newMockBloomStore(t *testing.T) (*BloomStore, string) {
metasCache := cache.NewMockCache()
blocksCache := NewBlocksCache(storageConfig.BloomShipperConfig.BlocksCache, prometheus.NewPedanticRegistry(), logger)
+
store, err := NewBloomStore(periodicConfigs, storageConfig, metrics, metasCache, blocksCache, logger)
- require.NoError(t, err)
- t.Cleanup(store.Stop)
+ if err == nil {
+ t.Cleanup(store.Stop)
+ }
- return store, workDir
+ return store, workDir, err
}
func createMetaInStorage(store *BloomStore, tenant string, start model.Time, minFp, maxFp model.Fingerprint) (Meta, error) {
@@ -123,7 +129,8 @@ func createBlockInStorage(t *testing.T, store *BloomStore, tenant string, start
}
func TestBloomStore_ResolveMetas(t *testing.T) {
- store, _ := newMockBloomStore(t)
+ store, _, err := newMockBloomStore(t)
+ require.NoError(t, err)
// schema 1
// outside of interval, outside of bounds
@@ -178,7 +185,8 @@ func TestBloomStore_ResolveMetas(t *testing.T) {
}
func TestBloomStore_FetchMetas(t *testing.T) {
- store, _ := newMockBloomStore(t)
+ store, _, err := newMockBloomStore(t)
+ require.NoError(t, err)
// schema 1
// outside of interval, outside of bounds
@@ -231,7 +239,8 @@ func TestBloomStore_FetchMetas(t *testing.T) {
}
func TestBloomStore_FetchBlocks(t *testing.T) {
- store, _ := newMockBloomStore(t)
+ store, _, err := newMockBloomStore(t)
+ require.NoError(t, err)
// schema 1
b1, _ := createBlockInStorage(t, store, "tenant", parseTime("2024-01-20 00:00"), 0x00000000, 0x0000ffff)
@@ -259,3 +268,33 @@ func TestBloomStore_FetchBlocks(t *testing.T) {
[]BlockRef{bqs[0].BlockRef, bqs[1].BlockRef, bqs[2].BlockRef, bqs[3].BlockRef},
)
}
+
+func TestBloomShipper_WorkingDir(t *testing.T) {
+ t.Run("insufficient permissions on directory yields error", func(t *testing.T) {
+ base := t.TempDir()
+ wd := filepath.Join(base, "notpermitted")
+ err := os.MkdirAll(wd, 0500)
+ require.NoError(t, err)
+ fi, _ := os.Stat(wd)
+ t.Log("working directory", wd, fi.Mode())
+
+ _, _, err = newMockBloomStoreWithWorkDir(t, wd)
+ require.ErrorContains(t, err, "insufficient permissions")
+ })
+
+ t.Run("not existing directory will be created", func(t *testing.T) {
+ base := t.TempDir()
+ // if the base directory does not exist, it will be created
+ wd := filepath.Join(base, "doesnotexist")
+ t.Log("working directory", wd)
+
+ store, _, err := newMockBloomStoreWithWorkDir(t, wd)
+ require.NoError(t, err)
+ b, err := createBlockInStorage(t, store, "tenant", parseTime("2024-01-20 00:00"), 0x00000000, 0x0000ffff)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+ _, err = store.FetchBlocks(ctx, []BlockRef{b.BlockRef})
+ require.NoError(t, err)
+ })
+}
From dbea8ba85b355e1ece0a4bf88c64c90251523a97 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Thu, 22 Feb 2024 13:41:30 +0100
Subject: [PATCH 112/130] chore(blooms): Various fixes in blooms read path
(#12036)
* Wire up bloom querier metrics to fix nil pointer panic
* Use `bloomutils.TokenRangesForInstance` instead of `subRing.GetTokenRangesForInstance` to avoid `zone not set` error
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/bloomgateway.go | 5 +++--
pkg/bloomgateway/client.go | 2 +-
pkg/bloomgateway/querier.go | 9 +++++++--
pkg/bloomgateway/querier_test.go | 6 +++---
pkg/loki/modules.go | 2 +-
5 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index 4e36e5ce3018..0e18a06c9327 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -72,8 +72,9 @@ import (
var errGatewayUnhealthy = errors.New("bloom-gateway is unhealthy in the ring")
const (
- pendingTasksInitialCap = 1024
- metricsSubsystem = "bloom_gateway"
+ pendingTasksInitialCap = 1024
+ metricsSubsystem = "bloom_gateway"
+ querierMetricsSubsystem = "bloom_gateway_querier"
)
var (
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index cf148f018bab..fe9261082465 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -311,7 +311,7 @@ func replicationSetsWithBounds(subRing ring.ReadRing, instances []ring.InstanceD
servers := make([]rsWithRanges, 0, len(instances))
for _, inst := range instances {
- tr, err := subRing.GetTokenRangesForInstance(inst.Id)
+ tr, err := bloomutils.TokenRangesForInstance(inst.Id, instances)
if err != nil {
return nil, errors.Wrap(err, "bloom gateway get ring")
}
diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go
index 02608bfdf71c..799fb691c0e4 100644
--- a/pkg/bloomgateway/querier.go
+++ b/pkg/bloomgateway/querier.go
@@ -11,6 +11,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/util/constants"
)
type querierMetrics struct {
@@ -57,8 +58,12 @@ type BloomQuerier struct {
metrics *querierMetrics
}
-func NewQuerier(c Client, logger log.Logger) *BloomQuerier {
- return &BloomQuerier{c: c, logger: logger}
+func NewQuerier(c Client, r prometheus.Registerer, logger log.Logger) *BloomQuerier {
+ return &BloomQuerier{
+ c: c,
+ metrics: newQuerierMetrics(r, constants.Loki, querierMetricsSubsystem),
+ logger: logger,
+ }
}
func convertToShortRef(ref *logproto.ChunkRef) *logproto.ShortRef {
diff --git a/pkg/bloomgateway/querier_test.go b/pkg/bloomgateway/querier_test.go
index 1e7cfc30a53b..57e4d501bb44 100644
--- a/pkg/bloomgateway/querier_test.go
+++ b/pkg/bloomgateway/querier_test.go
@@ -32,7 +32,7 @@ func TestBloomQuerier(t *testing.T) {
t.Run("client not called when filters are empty", func(t *testing.T) {
c := &noopClient{}
- bq := NewQuerier(c, logger)
+ bq := NewQuerier(c, nil, logger)
ctx := context.Background()
through := model.Now()
@@ -51,7 +51,7 @@ func TestBloomQuerier(t *testing.T) {
t.Run("client not called when chunkRefs are empty", func(t *testing.T) {
c := &noopClient{}
- bq := NewQuerier(c, logger)
+ bq := NewQuerier(c, nil, logger)
ctx := context.Background()
through := model.Now()
@@ -68,7 +68,7 @@ func TestBloomQuerier(t *testing.T) {
t.Run("querier propagates error from client", func(t *testing.T) {
c := &noopClient{err: errors.New("something went wrong")}
- bq := NewQuerier(c, logger)
+ bq := NewQuerier(c, nil, logger)
ctx := context.Background()
through := model.Now()
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 57c6e96a2b3d..9d5a614dc579 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1354,7 +1354,7 @@ func (t *Loki) initIndexGateway() (services.Service, error) {
if err != nil {
return nil, err
}
- bloomQuerier = bloomgateway.NewQuerier(bloomGatewayClient, logger)
+ bloomQuerier = bloomgateway.NewQuerier(bloomGatewayClient, prometheus.DefaultRegisterer, logger)
}
gateway, err := indexgateway.NewIndexGateway(t.Cfg.IndexGateway, logger, prometheus.DefaultRegisterer, t.Store, indexClients, bloomQuerier)
From 782b93833449149b834506ea99924c4af5010a79 Mon Sep 17 00:00:00 2001
From: Trevor Whitney
Date: Thu, 22 Feb 2024 14:34:00 -0700
Subject: [PATCH 113/130] test: split unit and integrations suites to
parallelize (#12039)
---
.golangci.yml | 1 +
Makefile | 8 ++++++--
integration/loki_micro_services_delete_test.go | 2 ++
integration/loki_micro_services_test.go | 1 +
integration/loki_rule_eval_test.go | 1 +
integration/loki_simple_scalable_test.go | 1 +
integration/loki_single_binary_test.go | 1 +
integration/multi_tenant_queries_test.go | 1 +
integration/parse_metrics.go | 1 +
integration/parse_metrics_test.go | 1 +
integration/per_request_limits_test.go | 1 +
integration/shared_test.go | 2 ++
12 files changed, 19 insertions(+), 2 deletions(-)
diff --git a/.golangci.yml b/.golangci.yml
index fb3c1ab689d0..e6475895ad94 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -23,6 +23,7 @@ run:
- linux
- cgo
- promtail_journal_enabled
+ - integration
# which dirs to skip: they won't be analyzed;
# can use regexp here: generated.*, regexp is applied on full path;
diff --git a/Makefile b/Makefile
index aaa64d755e03..865e16ec3ba7 100644
--- a/Makefile
+++ b/Makefile
@@ -325,8 +325,12 @@ lint: ## run linters
########
test: all ## run the unit tests
- $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./... | sed "s:$$: ${DRONE_STEP_NAME} ${DRONE_SOURCE_BRANCH}:" | tee test_results.txt
- cd tools/lambda-promtail/ && $(GOTEST) -covermode=atomic -coverprofile=lambda-promtail-coverage.txt -p=4 ./... | sed "s:$$: ${DRONE_STEP_NAME} ${DRONE_SOURCE_BRANCH}:" | tee lambda_promtail_test_results.txt
+ $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./... | tee test_results.txt
+ cd tools/lambda-promtail/ && $(GOTEST) -covermode=atomic -coverprofile=lambda-promtail-coverage.txt -p=4 ./... | tee lambda_promtail_test_results.txt
+
+test-integration:
+ $(GOTEST) -count=1 -v -tags=integration -timeout 10m ./integration
+
compare-coverage:
./tools/diff_coverage.sh $(old) $(new) $(packages)
diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go
index 07195d919ee1..d77d7ab11508 100644
--- a/integration/loki_micro_services_delete_test.go
+++ b/integration/loki_micro_services_delete_test.go
@@ -1,3 +1,5 @@
+//go:build integration
+
package integration
import (
diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go
index 3b888314cd68..67f888d41acf 100644
--- a/integration/loki_micro_services_test.go
+++ b/integration/loki_micro_services_test.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/loki_rule_eval_test.go b/integration/loki_rule_eval_test.go
index 41414d4aef67..025b74df5ad8 100644
--- a/integration/loki_rule_eval_test.go
+++ b/integration/loki_rule_eval_test.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/loki_simple_scalable_test.go b/integration/loki_simple_scalable_test.go
index 2de17e3420b8..ccbf839b6a6a 100644
--- a/integration/loki_simple_scalable_test.go
+++ b/integration/loki_simple_scalable_test.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/loki_single_binary_test.go b/integration/loki_single_binary_test.go
index 16bb5b36944d..31b9990d3ad0 100644
--- a/integration/loki_single_binary_test.go
+++ b/integration/loki_single_binary_test.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/multi_tenant_queries_test.go b/integration/multi_tenant_queries_test.go
index 76fbd63f13bd..cec967fd1318 100644
--- a/integration/multi_tenant_queries_test.go
+++ b/integration/multi_tenant_queries_test.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/parse_metrics.go b/integration/parse_metrics.go
index 9f2bf5fc8fc2..d2896de6e29c 100644
--- a/integration/parse_metrics.go
+++ b/integration/parse_metrics.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/parse_metrics_test.go b/integration/parse_metrics_test.go
index 94c19b7584ad..7af3289e36db 100644
--- a/integration/parse_metrics_test.go
+++ b/integration/parse_metrics_test.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/per_request_limits_test.go b/integration/per_request_limits_test.go
index 85642e0439e6..93e2c440861e 100644
--- a/integration/per_request_limits_test.go
+++ b/integration/per_request_limits_test.go
@@ -1,3 +1,4 @@
+//go:build integration
package integration
import (
diff --git a/integration/shared_test.go b/integration/shared_test.go
index 61b469ecd004..12338ec99ab2 100644
--- a/integration/shared_test.go
+++ b/integration/shared_test.go
@@ -1,3 +1,5 @@
+//go:build integration
+
package integration
import (
From 4fa5148eb505291a28277edfb7d31e118a62809d Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Fri, 23 Feb 2024 10:41:52 +0100
Subject: [PATCH 114/130] refactor: Pass query plan down to bloom gateway
(#12037)
---
pkg/bloomgateway/bloomgateway.go | 6 +-
pkg/bloomgateway/bloomgateway_test.go | 35 +-
pkg/bloomgateway/cache_test.go | 8 +-
pkg/bloomgateway/client.go | 8 +-
pkg/bloomgateway/client_test.go | 6 +-
pkg/bloomgateway/multiplexing.go | 6 +-
pkg/bloomgateway/multiplexing_test.go | 2 +-
pkg/bloomgateway/processor_test.go | 18 +-
pkg/bloomgateway/querier.go | 7 +-
pkg/bloomgateway/querier_test.go | 23 +-
pkg/bloomgateway/util.go | 8 +-
pkg/logproto/bloomgateway.pb.go | 126 +++++--
pkg/logproto/bloomgateway.proto | 7 +-
pkg/logproto/compat.go | 20 +-
pkg/logproto/compat_test.go | 30 +-
pkg/logproto/logproto.pb.go | 341 ++++++++++--------
pkg/logproto/logproto.proto | 5 +
pkg/logql/syntax/ast.go | 16 +
pkg/logql/syntax/serialize_test.go | 3 +
pkg/querier/plan/plan.go | 15 +
pkg/storage/chunk/predicate.go | 16 +-
pkg/storage/store.go | 27 +-
.../series/series_index_gateway_store.go | 2 +-
.../indexshipper/indexgateway/gateway.go | 29 +-
24 files changed, 451 insertions(+), 313 deletions(-)
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index 0e18a06c9327..d0ac92db59a3 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -58,6 +58,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/queue"
"github.com/grafana/loki/pkg/storage"
@@ -311,7 +312,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
}
// Shortcut if request does not contain filters
- if len(req.Filters) == 0 {
+ if len(syntax.ExtractLineFilters(req.Plan.AST)) == 0 {
return &logproto.FilterChunkRefResponse{
ChunkRefs: req.Refs,
}, nil
@@ -332,9 +333,10 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
}, nil
}
+ filters := syntax.ExtractLineFilters(req.Plan.AST)
tasks := make([]Task, 0, len(seriesByDay))
for _, seriesWithBounds := range seriesByDay {
- task, err := NewTask(ctx, tenantID, seriesWithBounds, req.Filters)
+ task, err := NewTask(ctx, tenantID, seriesWithBounds, filters)
if err != nil {
return nil, err
}
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index 9a4dea08dba2..f853398894e0 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -17,11 +17,11 @@ import (
"github.com/grafana/dskit/user"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
@@ -196,13 +196,14 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
// saturate workers
// then send additional request
for i := 0; i < gw.cfg.WorkerConcurrency+1; i++ {
+ expr, err := syntax.ParseExpr(`{foo="bar"} |= "does not match"`)
+ require.NoError(t, err)
+
req := &logproto.FilterChunkRefRequest{
From: now.Add(-24 * time.Hour),
Through: now,
Refs: groupRefs(t, chunkRefs),
- Filters: []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: "does not match"},
- },
+ Plan: plan.QueryPlan{AST: expr},
}
ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
@@ -243,13 +244,14 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
// saturate workers
// then send additional request
for i := 0; i < gw.cfg.WorkerConcurrency+1; i++ {
+ expr, err := syntax.ParseExpr(`{foo="bar"} |= "does not match"`)
+ require.NoError(t, err)
+
req := &logproto.FilterChunkRefRequest{
From: now.Add(-24 * time.Hour),
Through: now,
Refs: groupRefs(t, chunkRefs),
- Filters: []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: "does not match"},
- },
+ Plan: plan.QueryPlan{AST: expr},
}
ctx, cancelFn := context.WithTimeout(context.Background(), 500*time.Millisecond)
@@ -331,13 +333,13 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
Checksum: uint32(idx),
},
}
+ expr, err := syntax.ParseExpr(`{foo="bar"} |= "foo"`)
+ require.NoError(t, err)
req := &logproto.FilterChunkRefRequest{
From: now.Add(-24 * time.Hour),
Through: now,
Refs: groupRefs(t, chunkRefs),
- Filters: []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: "foo"},
- },
+ Plan: plan.QueryPlan{AST: expr},
}
ctx := user.InjectOrgID(context.Background(), tenantID)
_, err = gw.FilterChunkRefs(ctx, req)
@@ -371,13 +373,13 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
t.Run("no match - return empty response", func(t *testing.T) {
inputChunkRefs := groupRefs(t, chunkRefs)
+ expr, err := syntax.ParseExpr(`{foo="bar"} |= "does not match"`)
+ require.NoError(t, err)
req := &logproto.FilterChunkRefRequest{
From: now.Add(-8 * time.Hour),
Through: now,
Refs: inputChunkRefs,
- Filters: []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: "does not match"},
- },
+ Plan: plan.QueryPlan{AST: expr},
}
ctx := user.InjectOrgID(context.Background(), tenantID)
res, err := gw.FilterChunkRefs(ctx, req)
@@ -402,13 +404,14 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
t.Log("x=", x, "fp=", fp, "line=", line)
+ expr, err := syntax.ParseExpr(fmt.Sprintf(`{foo="bar"} |= "%s"`, line))
+ require.NoError(t, err)
+
req := &logproto.FilterChunkRefRequest{
From: now.Add(-8 * time.Hour),
Through: now,
Refs: inputChunkRefs,
- Filters: []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: line},
- },
+ Plan: plan.QueryPlan{AST: expr},
}
ctx := user.InjectOrgID(context.Background(), tenantID)
res, err := gw.FilterChunkRefs(ctx, req)
diff --git a/pkg/bloomgateway/cache_test.go b/pkg/bloomgateway/cache_test.go
index 3ae414cc43c6..bf1a8dbaa365 100644
--- a/pkg/bloomgateway/cache_test.go
+++ b/pkg/bloomgateway/cache_test.go
@@ -8,13 +8,13 @@ import (
"github.com/go-kit/log"
"github.com/grafana/dskit/user"
"github.com/prometheus/common/model"
- "github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/cache/resultscache"
"github.com/grafana/loki/pkg/util/constants"
@@ -382,13 +382,13 @@ func TestCache(t *testing.T) {
Through: 3500,
},
}
+ expr, err := syntax.ParseExpr(`{foo="bar"} |= "does not match"`)
+ require.NoError(t, err)
req := &logproto.FilterChunkRefRequest{
From: model.Time(2000),
Through: model.Time(3000),
Refs: groupRefs(t, chunkRefs),
- Filters: []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: "foo"},
- },
+ Plan: plan.QueryPlan{AST: expr},
}
expectedRes := &logproto.FilterChunkRefResponse{
ChunkRefs: groupRefs(t, chunkRefs),
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index fe9261082465..d7328c3c8c31 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -26,8 +26,8 @@ import (
"github.com/grafana/loki/pkg/bloomutils"
"github.com/grafana/loki/pkg/distributor/clientpool"
"github.com/grafana/loki/pkg/logproto"
- "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/queue"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/cache"
@@ -142,7 +142,7 @@ func (i *ClientConfig) Validate() error {
}
type Client interface {
- FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, filters ...syntax.LineFilter) ([]*logproto.GroupedChunkRefs, error)
+ FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, plan plan.QueryPlan) ([]*logproto.GroupedChunkRefs, error)
}
type GatewayClient struct {
@@ -224,7 +224,7 @@ func shuffleAddrs(addrs []string) []string {
}
// FilterChunkRefs implements Client
-func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, filters ...syntax.LineFilter) ([]*logproto.GroupedChunkRefs, error) {
+func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, plan plan.QueryPlan) ([]*logproto.GroupedChunkRefs, error) {
if !c.limits.BloomGatewayEnabled(tenant) {
return groups, nil
}
@@ -252,7 +252,7 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, tenant string, from, t
From: from,
Through: through,
Refs: rs.groups,
- Filters: filters,
+ Plan: plan,
}
resp, err := client.FilterChunkRefs(ctx, req)
if err != nil {
diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go
index 0280007443d8..e4b905c37b12 100644
--- a/pkg/bloomgateway/client_test.go
+++ b/pkg/bloomgateway/client_test.go
@@ -16,6 +16,8 @@ import (
"github.com/grafana/loki/pkg/bloomutils"
"github.com/grafana/loki/pkg/logproto"
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/validation"
)
@@ -42,7 +44,9 @@ func TestBloomGatewayClient(t *testing.T) {
t.Run("FilterChunks returns response", func(t *testing.T) {
c, err := NewClient(cfg, &mockRing{}, l, reg, logger, "loki", nil, false)
require.NoError(t, err)
- res, err := c.FilterChunks(context.Background(), "tenant", model.Now(), model.Now(), nil)
+ expr, err := syntax.ParseExpr(`{foo="bar"}`)
+ require.NoError(t, err)
+ res, err := c.FilterChunks(context.Background(), "tenant", model.Now(), model.Now(), nil, plan.QueryPlan{AST: expr})
require.NoError(t, err)
require.Equal(t, []*logproto.GroupedChunkRefs{}, res)
})
diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go
index c952c9f6b87f..907f8f111eb1 100644
--- a/pkg/bloomgateway/multiplexing.go
+++ b/pkg/bloomgateway/multiplexing.go
@@ -63,7 +63,7 @@ type Task struct {
// series of the original request
series []*logproto.GroupedChunkRefs
// filters of the original request
- filters []syntax.LineFilter
+ filters []syntax.LineFilterExpr
// from..through date of the task's chunks
bounds model.Interval
// the context from the request
@@ -76,7 +76,7 @@ type Task struct {
// NewTask returns a new Task that can be enqueued to the task queue.
// In addition, it returns a result and an error channel, as well
// as an error if the instantiation fails.
-func NewTask(ctx context.Context, tenantID string, refs seriesWithBounds, filters []syntax.LineFilter) (Task, error) {
+func NewTask(ctx context.Context, tenantID string, refs seriesWithBounds, filters []syntax.LineFilterExpr) (Task, error) {
key, err := ulid.New(ulid.Now(), entropy)
if err != nil {
return Task{}, err
@@ -140,7 +140,7 @@ func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task {
func (t Task) RequestIter(tokenizer *v1.NGramTokenizer) v1.Iterator[v1.Request] {
return &requestIterator{
series: v1.NewSliceIter(t.series),
- searches: convertToSearches(t.filters, tokenizer),
+ searches: convertToSearches(tokenizer, t.filters...),
channel: t.resCh,
curr: v1.Request{},
}
diff --git a/pkg/bloomgateway/multiplexing_test.go b/pkg/bloomgateway/multiplexing_test.go
index 009c825a7e84..a6ad0270d96e 100644
--- a/pkg/bloomgateway/multiplexing_test.go
+++ b/pkg/bloomgateway/multiplexing_test.go
@@ -62,7 +62,7 @@ func TestTask_RequestIterator(t *testing.T) {
bounds: model.Interval{Start: 0, End: math.MaxInt64},
series: []*logproto.GroupedChunkRefs{},
}
- task, _ := NewTask(context.Background(), tenant, swb, []syntax.LineFilter{})
+ task, _ := NewTask(context.Background(), tenant, swb, []syntax.LineFilterExpr{})
it := task.RequestIter(tokenizer)
// nothing to iterate over
require.False(t, it.Next())
diff --git a/pkg/bloomgateway/processor_test.go b/pkg/bloomgateway/processor_test.go
index 27d0068753d5..84687995833b 100644
--- a/pkg/bloomgateway/processor_test.go
+++ b/pkg/bloomgateway/processor_test.go
@@ -112,8 +112,13 @@ func TestProcessor(t *testing.T) {
},
table: config.NewDayTime(truncateDay(now)),
}
- filters := []syntax.LineFilter{
- {Ty: 0, Match: "no match"},
+ filters := []syntax.LineFilterExpr{
+ {
+ LineFilter: syntax.LineFilter{
+ Ty: 0,
+ Match: "no match",
+ },
+ },
}
t.Log("series", len(swb.series))
@@ -156,8 +161,13 @@ func TestProcessor(t *testing.T) {
},
table: config.NewDayTime(truncateDay(now)),
}
- filters := []syntax.LineFilter{
- {Ty: 0, Match: "no match"},
+ filters := []syntax.LineFilterExpr{
+ {
+ LineFilter: syntax.LineFilter{
+ Ty: 0,
+ Match: "no match",
+ },
+ },
}
t.Log("series", len(swb.series))
diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go
index 799fb691c0e4..171936d9e39c 100644
--- a/pkg/bloomgateway/querier.go
+++ b/pkg/bloomgateway/querier.go
@@ -11,6 +11,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/util/constants"
)
@@ -70,9 +71,9 @@ func convertToShortRef(ref *logproto.ChunkRef) *logproto.ShortRef {
return &logproto.ShortRef{From: ref.From, Through: ref.Through, Checksum: ref.Checksum}
}
-func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from, through model.Time, chunkRefs []*logproto.ChunkRef, filters ...syntax.LineFilter) ([]*logproto.ChunkRef, error) {
+func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from, through model.Time, chunkRefs []*logproto.ChunkRef, queryPlan plan.QueryPlan) ([]*logproto.ChunkRef, error) {
// Shortcut that does not require any filtering
- if len(chunkRefs) == 0 || len(filters) == 0 {
+ if len(chunkRefs) == 0 || len(syntax.ExtractLineFilters(queryPlan.AST)) == 0 {
return chunkRefs, nil
}
@@ -84,7 +85,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
preFilterChunks := len(chunkRefs)
preFilterSeries := len(grouped)
- refs, err := bq.c.FilterChunks(ctx, tenant, from, through, grouped, filters...)
+ refs, err := bq.c.FilterChunks(ctx, tenant, from, through, grouped, queryPlan)
if err != nil {
return nil, err
}
diff --git a/pkg/bloomgateway/querier_test.go b/pkg/bloomgateway/querier_test.go
index 57e4d501bb44..0d7872927cc4 100644
--- a/pkg/bloomgateway/querier_test.go
+++ b/pkg/bloomgateway/querier_test.go
@@ -8,11 +8,11 @@ import (
"github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
- "github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
)
type noopClient struct {
@@ -21,7 +21,7 @@ type noopClient struct {
}
// FilterChunks implements Client.
-func (c *noopClient) FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, filters ...syntax.LineFilter) ([]*logproto.GroupedChunkRefs, error) { // nolint:revive
+func (c *noopClient) FilterChunks(ctx context.Context, tenant string, from, through model.Time, groups []*logproto.GroupedChunkRefs, plan plan.QueryPlan) ([]*logproto.GroupedChunkRefs, error) { // nolint:revive
c.callCount++
return groups, c.err
}
@@ -42,8 +42,9 @@ func TestBloomQuerier(t *testing.T) {
{Fingerprint: 1000, UserID: tenant, Checksum: 2},
{Fingerprint: 2000, UserID: tenant, Checksum: 3},
}
- filters := []syntax.LineFilter{}
- res, err := bq.FilterChunkRefs(ctx, tenant, from, through, chunkRefs, filters...)
+ expr, err := syntax.ParseExpr(`{foo="bar"}`)
+ require.NoError(t, err)
+ res, err := bq.FilterChunkRefs(ctx, tenant, from, through, chunkRefs, plan.QueryPlan{AST: expr})
require.NoError(t, err)
require.Equal(t, chunkRefs, res)
require.Equal(t, 0, c.callCount)
@@ -57,10 +58,9 @@ func TestBloomQuerier(t *testing.T) {
through := model.Now()
from := through.Add(-12 * time.Hour)
chunkRefs := []*logproto.ChunkRef{}
- filters := []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: "uuid"},
- }
- res, err := bq.FilterChunkRefs(ctx, tenant, from, through, chunkRefs, filters...)
+ expr, err := syntax.ParseExpr(`{foo="bar"} |= "uuid"`)
+ require.NoError(t, err)
+ res, err := bq.FilterChunkRefs(ctx, tenant, from, through, chunkRefs, plan.QueryPlan{AST: expr})
require.NoError(t, err)
require.Equal(t, chunkRefs, res)
require.Equal(t, 0, c.callCount)
@@ -78,10 +78,9 @@ func TestBloomQuerier(t *testing.T) {
{Fingerprint: 1000, UserID: tenant, Checksum: 2},
{Fingerprint: 2000, UserID: tenant, Checksum: 3},
}
- filters := []syntax.LineFilter{
- {Ty: labels.MatchEqual, Match: "uuid"},
- }
- res, err := bq.FilterChunkRefs(ctx, tenant, from, through, chunkRefs, filters...)
+ expr, err := syntax.ParseExpr(`{foo="bar"} |= "uuid"`)
+ require.NoError(t, err)
+ res, err := bq.FilterChunkRefs(ctx, tenant, from, through, chunkRefs, plan.QueryPlan{AST: expr})
require.Error(t, err)
require.Nil(t, res)
})
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index 3ab234aaa8ae..c3ea06a3df53 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -48,9 +48,15 @@ func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) {
// convertToSearches converts a list of line filter expressions to a list of
// byte slices that can be used with the bloom filters.
-func convertToSearches(filters []syntax.LineFilter, t *v1.NGramTokenizer) [][]byte {
+func convertToSearches(t *v1.NGramTokenizer, filters ...syntax.LineFilterExpr) [][]byte {
searches := make([][]byte, 0, (13-t.N)*len(filters))
for _, f := range filters {
+ if f.Left != nil {
+ searches = append(searches, convertToSearches(t, *f.Left)...)
+ }
+ if f.Or != nil {
+ searches = append(searches, convertToSearches(t, *f.Or)...)
+ }
if f.Ty == labels.MatchEqual {
it := t.Tokens(f.Match)
for it.Next() {
diff --git a/pkg/logproto/bloomgateway.pb.go b/pkg/logproto/bloomgateway.pb.go
index e5c57e058bd2..98a22fd13168 100644
--- a/pkg/logproto/bloomgateway.pb.go
+++ b/pkg/logproto/bloomgateway.pb.go
@@ -9,6 +9,7 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_grafana_loki_pkg_logql_syntax "github.com/grafana/loki/pkg/logql/syntax"
+ github_com_grafana_loki_pkg_querier_plan "github.com/grafana/loki/pkg/querier/plan"
github_com_prometheus_common_model "github.com/prometheus/common/model"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
@@ -32,10 +33,12 @@ var _ = math.Inf
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type FilterChunkRefRequest struct {
- From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"`
- Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"`
- Refs []*GroupedChunkRefs `protobuf:"bytes,3,rep,name=refs,proto3" json:"refs,omitempty"`
+ From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"`
+ Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"`
+ Refs []*GroupedChunkRefs `protobuf:"bytes,3,rep,name=refs,proto3" json:"refs,omitempty"`
+ // TODO(salvacorts): Delete this field once the weekly release is done.
Filters []github_com_grafana_loki_pkg_logql_syntax.LineFilter `protobuf:"bytes,4,rep,name=filters,proto3,customtype=github.com/grafana/loki/pkg/logql/syntax.LineFilter" json:"filters"`
+ Plan github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,5,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan"`
}
func (m *FilterChunkRefRequest) Reset() { *m = FilterChunkRefRequest{} }
@@ -234,37 +237,40 @@ func init() {
func init() { proto.RegisterFile("pkg/logproto/bloomgateway.proto", fileDescriptor_a50b5dd1dbcd1415) }
var fileDescriptor_a50b5dd1dbcd1415 = []byte{
- // 480 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x53, 0xbd, 0x6e, 0xd4, 0x30,
- 0x1c, 0x8f, 0x7b, 0xa7, 0xf6, 0xea, 0x82, 0x40, 0x56, 0xa9, 0xa2, 0x20, 0xf9, 0xa2, 0x08, 0xc1,
- 0x4d, 0x89, 0xd4, 0x2e, 0x48, 0x6c, 0x57, 0x89, 0x0a, 0x89, 0xc9, 0x20, 0x86, 0x6e, 0xb9, 0xd4,
- 0xf9, 0x50, 0x12, 0xff, 0x53, 0xdb, 0x11, 0x74, 0xe3, 0x11, 0x78, 0x0c, 0x9e, 0x80, 0x27, 0x60,
- 0xe8, 0x78, 0x63, 0xc5, 0x50, 0x71, 0xb9, 0x85, 0xb1, 0x8f, 0x80, 0xea, 0x5c, 0x7a, 0x77, 0x15,
- 0xe8, 0x24, 0x26, 0x26, 0x7f, 0xfc, 0xff, 0x3f, 0xfb, 0xf7, 0x61, 0xe3, 0x61, 0x95, 0x27, 0x41,
- 0x01, 0x49, 0x25, 0x41, 0x43, 0x30, 0x29, 0x00, 0xca, 0x24, 0xd4, 0xfc, 0x63, 0x78, 0xe1, 0x9b,
- 0x2d, 0x32, 0xe8, 0x8a, 0xce, 0x7e, 0x02, 0x09, 0xb4, 0x7d, 0xb7, 0xb3, 0xb6, 0xee, 0x3c, 0x5d,
- 0x3b, 0xa0, 0x9b, 0xb4, 0x45, 0xef, 0xfb, 0x16, 0x7e, 0xf2, 0x3a, 0x2b, 0x34, 0x97, 0xc7, 0x69,
- 0x2d, 0x72, 0xc6, 0x63, 0xc6, 0xcf, 0x6b, 0xae, 0x34, 0x39, 0xc6, 0xfd, 0x58, 0x42, 0x69, 0x23,
- 0x17, 0x8d, 0x7a, 0xe3, 0xe0, 0xf2, 0x7a, 0x68, 0xfd, 0xb8, 0x1e, 0xbe, 0x48, 0x32, 0x9d, 0xd6,
- 0x13, 0x3f, 0x82, 0x32, 0xa8, 0x24, 0x94, 0x5c, 0xa7, 0xbc, 0x56, 0x41, 0x04, 0x65, 0x09, 0x22,
- 0x28, 0xe1, 0x8c, 0x17, 0xfe, 0xfb, 0xac, 0xe4, 0xcc, 0x80, 0xc9, 0x1b, 0xbc, 0xa3, 0x53, 0x09,
- 0x75, 0x92, 0xda, 0x5b, 0xff, 0x76, 0x4e, 0x87, 0x27, 0x3e, 0xee, 0x4b, 0x1e, 0x2b, 0xbb, 0xe7,
- 0xf6, 0x46, 0x7b, 0x87, 0x8e, 0x7f, 0x27, 0xe4, 0x44, 0x42, 0x5d, 0xf1, 0xb3, 0x8e, 0xbf, 0x62,
- 0xa6, 0x8f, 0xe4, 0x78, 0x27, 0x36, 0xc2, 0x94, 0xdd, 0x37, 0x90, 0xfd, 0x25, 0xe4, 0x6d, 0x26,
- 0x78, 0xab, 0x7a, 0xfc, 0x6a, 0x41, 0xe8, 0x68, 0x85, 0x50, 0x22, 0xc3, 0x38, 0x14, 0x61, 0x50,
- 0x40, 0x9e, 0x05, 0x0b, 0xf7, 0xce, 0x8b, 0x40, 0x5d, 0x08, 0x1d, 0x7e, 0x5a, 0x01, 0xb3, 0xee,
- 0x06, 0x8f, 0xe1, 0x83, 0xfb, 0x2e, 0xaa, 0x0a, 0x84, 0xe2, 0xe4, 0x25, 0xde, 0x8d, 0x3a, 0x66,
- 0x36, 0xda, 0xc8, 0x7d, 0xd9, 0xec, 0x7d, 0x43, 0x78, 0xf0, 0x2e, 0x05, 0xa9, 0x19, 0x8f, 0xff,
- 0xbb, 0x34, 0x1c, 0x3c, 0x88, 0x52, 0x1e, 0xe5, 0xaa, 0x2e, 0xed, 0x9e, 0x8b, 0x46, 0x0f, 0xd9,
- 0xdd, 0xda, 0xd3, 0xf8, 0xf1, 0x7d, 0x5d, 0xc4, 0xc5, 0x7b, 0x71, 0x26, 0x12, 0x2e, 0x2b, 0x99,
- 0x09, 0x6d, 0x64, 0xf4, 0xd9, 0xea, 0x16, 0x39, 0xc0, 0xdb, 0x9a, 0x8b, 0x50, 0x68, 0xc3, 0x6d,
- 0x97, 0x2d, 0x56, 0xe4, 0xf9, 0x5a, 0xee, 0x64, 0xe9, 0x5d, 0xe7, 0x4d, 0x9b, 0xf7, 0x61, 0x8c,
- 0x1f, 0x8c, 0x6f, 0x3f, 0xc7, 0x49, 0xfb, 0x39, 0xc8, 0x07, 0xfc, 0x68, 0x3d, 0x12, 0x45, 0x86,
- 0x4b, 0xf0, 0x1f, 0xdf, 0xbc, 0xe3, 0xfe, 0xbd, 0xa1, 0x8d, 0xd3, 0xb3, 0xc6, 0xa7, 0xd3, 0x19,
- 0xb5, 0xae, 0x66, 0xd4, 0xba, 0x99, 0x51, 0xf4, 0xb9, 0xa1, 0xe8, 0x6b, 0x43, 0xd1, 0x65, 0x43,
- 0xd1, 0xb4, 0xa1, 0xe8, 0x67, 0x43, 0xd1, 0xaf, 0x86, 0x5a, 0x37, 0x0d, 0x45, 0x5f, 0xe6, 0xd4,
- 0x9a, 0xce, 0xa9, 0x75, 0x35, 0xa7, 0xd6, 0xe9, 0xb3, 0x0d, 0xcf, 0xcb, 0x5c, 0x3a, 0xd9, 0x36,
- 0xc3, 0xd1, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6d, 0x30, 0x9d, 0x8e, 0xf4, 0x03, 0x00, 0x00,
+ // 525 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x53, 0xbb, 0x6e, 0x13, 0x41,
+ 0x14, 0xdd, 0xc1, 0x26, 0x8f, 0x31, 0x2f, 0x8d, 0x42, 0xb4, 0x32, 0xd2, 0x78, 0x65, 0x21, 0x70,
+ 0xb5, 0x2b, 0x39, 0x0d, 0x82, 0xce, 0x91, 0x88, 0x90, 0x28, 0x60, 0x40, 0x14, 0x29, 0x90, 0xd6,
+ 0xce, 0xdd, 0x87, 0xbc, 0x3b, 0xb3, 0x9e, 0x99, 0x15, 0xb8, 0xe3, 0x13, 0xf8, 0x08, 0x0a, 0xbe,
+ 0x80, 0x6f, 0x48, 0xe9, 0x32, 0xa2, 0x88, 0xf0, 0xba, 0xa1, 0xcc, 0x27, 0x20, 0xcf, 0x7a, 0xb3,
+ 0x76, 0x04, 0x44, 0xa2, 0xa2, 0x9a, 0xc7, 0xbd, 0xe7, 0x9e, 0x7b, 0xee, 0x03, 0x77, 0xb2, 0x71,
+ 0xe8, 0x25, 0x22, 0xcc, 0xa4, 0xd0, 0xc2, 0x1b, 0x26, 0x42, 0xa4, 0xa1, 0xaf, 0xe1, 0x83, 0x3f,
+ 0x75, 0xcd, 0x17, 0xd9, 0xa9, 0x8c, 0xed, 0xbd, 0x50, 0x84, 0xa2, 0xf4, 0x5b, 0xde, 0x4a, 0x7b,
+ 0xfb, 0xc1, 0x46, 0x80, 0xea, 0x52, 0x1a, 0xbb, 0x5f, 0x1a, 0xf8, 0xfe, 0xf3, 0x38, 0xd1, 0x20,
+ 0x0f, 0xa3, 0x9c, 0x8f, 0x19, 0x04, 0x0c, 0x26, 0x39, 0x28, 0x4d, 0x0e, 0x71, 0x33, 0x90, 0x22,
+ 0xb5, 0x91, 0x83, 0x7a, 0x8d, 0x81, 0x77, 0x7a, 0xde, 0xb1, 0xbe, 0x9f, 0x77, 0x1e, 0x87, 0xb1,
+ 0x8e, 0xf2, 0xa1, 0x3b, 0x12, 0xa9, 0x97, 0x49, 0x91, 0x82, 0x8e, 0x20, 0x57, 0xde, 0x48, 0xa4,
+ 0xa9, 0xe0, 0x5e, 0x2a, 0x4e, 0x20, 0x71, 0xdf, 0xc6, 0x29, 0x30, 0x03, 0x26, 0x2f, 0xf0, 0xb6,
+ 0x8e, 0xa4, 0xc8, 0xc3, 0xc8, 0xbe, 0xf1, 0x6f, 0x71, 0x2a, 0x3c, 0x71, 0x71, 0x53, 0x42, 0xa0,
+ 0xec, 0x86, 0xd3, 0xe8, 0xb5, 0xfa, 0x6d, 0xf7, 0x52, 0xc8, 0x91, 0x14, 0x79, 0x06, 0x27, 0x55,
+ 0xfe, 0x8a, 0x19, 0x3f, 0x32, 0xc6, 0xdb, 0x81, 0x11, 0xa6, 0xec, 0xa6, 0x81, 0xec, 0xd5, 0x90,
+ 0x97, 0x31, 0x87, 0x52, 0xf5, 0xe0, 0xd9, 0x2a, 0xa1, 0x83, 0xb5, 0x84, 0x42, 0xe9, 0x07, 0x3e,
+ 0xf7, 0xbd, 0x44, 0x8c, 0x63, 0x6f, 0x55, 0xbd, 0x49, 0xe2, 0xa9, 0x29, 0xd7, 0xfe, 0xc7, 0x35,
+ 0x30, 0xab, 0x18, 0xc8, 0x7b, 0xdc, 0xcc, 0x12, 0x9f, 0xdb, 0x37, 0x1d, 0xd4, 0x6b, 0xf5, 0xef,
+ 0xd4, 0x4c, 0xaf, 0x12, 0x9f, 0x0f, 0x9e, 0xae, 0x38, 0xfa, 0x7f, 0xe3, 0x98, 0xe4, 0x20, 0x63,
+ 0x90, 0xde, 0x32, 0x8e, 0xfb, 0x3a, 0x07, 0x39, 0x5d, 0x62, 0x99, 0x89, 0xdb, 0x65, 0x78, 0xff,
+ 0x6a, 0x97, 0x54, 0x26, 0xb8, 0x02, 0xf2, 0x04, 0xef, 0x8e, 0x2a, 0xe5, 0x36, 0xba, 0xb6, 0x36,
+ 0xb5, 0x73, 0xf7, 0x1b, 0xc2, 0x3b, 0x6f, 0x22, 0x21, 0x35, 0x83, 0xe0, 0xbf, 0xeb, 0x76, 0x1b,
+ 0xef, 0x8c, 0x22, 0x18, 0x8d, 0x55, 0x9e, 0xda, 0x0d, 0x07, 0xf5, 0x6e, 0xb3, 0xcb, 0x77, 0x57,
+ 0xe3, 0x7b, 0x57, 0x75, 0x11, 0x07, 0xb7, 0x82, 0x98, 0x87, 0x20, 0x33, 0x19, 0x73, 0x6d, 0x64,
+ 0x34, 0xd9, 0xfa, 0x17, 0xd9, 0xc7, 0x5b, 0x1a, 0xb8, 0xcf, 0xb5, 0xc9, 0x6d, 0x97, 0xad, 0x5e,
+ 0xe4, 0xd1, 0xc6, 0x5c, 0x91, 0xba, 0x76, 0x55, 0x6d, 0xca, 0x79, 0xea, 0x07, 0xf8, 0xd6, 0x60,
+ 0xb9, 0x7c, 0x47, 0xe5, 0xf2, 0x91, 0x77, 0xf8, 0xee, 0x66, 0x4b, 0x14, 0xe9, 0xd4, 0xe0, 0xdf,
+ 0xee, 0x54, 0xdb, 0xf9, 0xb3, 0x43, 0xd9, 0xce, 0xae, 0x35, 0x38, 0x9e, 0xcd, 0xa9, 0x75, 0x36,
+ 0xa7, 0xd6, 0xc5, 0x9c, 0xa2, 0x4f, 0x05, 0x45, 0x5f, 0x0b, 0x8a, 0x4e, 0x0b, 0x8a, 0x66, 0x05,
+ 0x45, 0x3f, 0x0a, 0x8a, 0x7e, 0x16, 0xd4, 0xba, 0x28, 0x28, 0xfa, 0xbc, 0xa0, 0xd6, 0x6c, 0x41,
+ 0xad, 0xb3, 0x05, 0xb5, 0x8e, 0x1f, 0x5e, 0x33, 0xbe, 0x86, 0x74, 0xb8, 0x65, 0x8e, 0x83, 0x5f,
+ 0x01, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xe2, 0x64, 0x8a, 0x54, 0x04, 0x00, 0x00,
}
func (this *FilterChunkRefRequest) Equal(that interface{}) bool {
@@ -308,6 +314,9 @@ func (this *FilterChunkRefRequest) Equal(that interface{}) bool {
return false
}
}
+ if !this.Plan.Equal(that1.Plan) {
+ return false
+ }
return true
}
func (this *FilterChunkRefResponse) Equal(that interface{}) bool {
@@ -408,7 +417,7 @@ func (this *FilterChunkRefRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 8)
+ s := make([]string, 0, 9)
s = append(s, "&logproto.FilterChunkRefRequest{")
s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n")
s = append(s, "Through: "+fmt.Sprintf("%#v", this.Through)+",\n")
@@ -416,6 +425,7 @@ func (this *FilterChunkRefRequest) GoString() string {
s = append(s, "Refs: "+fmt.Sprintf("%#v", this.Refs)+",\n")
}
s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n")
+ s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -566,6 +576,16 @@ func (m *FilterChunkRefRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ {
+ size := m.Plan.Size()
+ i -= size
+ if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintBloomgateway(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
if len(m.Filters) > 0 {
for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -766,6 +786,8 @@ func (m *FilterChunkRefRequest) Size() (n int) {
n += 1 + l + sovBloomgateway(uint64(l))
}
}
+ l = m.Plan.Size()
+ n += 1 + l + sovBloomgateway(uint64(l))
return n
}
@@ -844,6 +866,7 @@ func (this *FilterChunkRefRequest) String() string {
`Through:` + fmt.Sprintf("%v", this.Through) + `,`,
`Refs:` + repeatedStringForRefs + `,`,
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+ `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`,
`}`,
}, "")
return s
@@ -1035,6 +1058,39 @@ func (m *FilterChunkRefRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBloomgateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthBloomgateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthBloomgateway
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipBloomgateway(dAtA[iNdEx:])
diff --git a/pkg/logproto/bloomgateway.proto b/pkg/logproto/bloomgateway.proto
index 473ecf3f8153..13d5c25e763f 100644
--- a/pkg/logproto/bloomgateway.proto
+++ b/pkg/logproto/bloomgateway.proto
@@ -17,10 +17,15 @@ message FilterChunkRefRequest {
(gogoproto.nullable) = false
];
repeated GroupedChunkRefs refs = 3;
- repeated logproto.LineFilter filters = 4 [
+ // TODO(salvacorts): Delete this field once the weekly release is done.
+ repeated LineFilter filters = 4 [
(gogoproto.customtype) = "github.com/grafana/loki/pkg/logql/syntax.LineFilter",
(gogoproto.nullable) = false
];
+ Plan plan = 5 [
+ (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan",
+ (gogoproto.nullable) = false
+ ];
}
message FilterChunkRefResponse {
diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go
index ee3d9ce1e003..0e65a90da02f 100644
--- a/pkg/logproto/compat.go
+++ b/pkg/logproto/compat.go
@@ -367,24 +367,8 @@ func (m *FilterChunkRefRequest) GetQuery() string {
chunksHash = h.Sum64()
}
- // Short circuit if there are no filters.
- if len(m.Filters) == 0 {
- return fmt.Sprintf("%d", chunksHash)
- }
-
- var sb strings.Builder
- for i, filter := range m.Filters {
- if i > 0 {
- sb.WriteString(",")
- }
- sb.Write(fmt.Appendf(encodeBuf[:0], "%d", filter.Ty))
- sb.WriteString("-")
- sb.WriteString(filter.Match)
- sb.WriteString("-")
- sb.WriteString(filter.Op)
- }
-
- return fmt.Sprintf("%d/%s", chunksHash, sb.String())
+ // TODO(salvacorts): plan.String() will return the whole query. This is not optimal since we are only interested in the filter expressions.
+ return fmt.Sprintf("%d/%d", chunksHash, m.Plan.Hash())
}
// GetCachingOptions returns the caching options.
diff --git a/pkg/logproto/compat_test.go b/pkg/logproto/compat_test.go
index 4cfad825e183..a066fe65fed1 100644
--- a/pkg/logproto/compat_test.go
+++ b/pkg/logproto/compat_test.go
@@ -7,12 +7,12 @@ import (
"testing"
"unsafe"
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
jsoniter "github.com/json-iterator/go"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "github.com/grafana/loki/pkg/logql/syntax"
)
// This test verifies that jsoninter uses our custom method for marshalling.
@@ -287,7 +287,7 @@ func TestFilterChunkRefRequestGetQuery(t *testing.T) {
}{
{
desc: "empty request",
- expected: `0`,
+ expected: `0/0`,
},
{
desc: "request no filters",
@@ -299,19 +299,16 @@ func TestFilterChunkRefRequestGetQuery(t *testing.T) {
},
},
},
- expected: `9962287286179718960`,
+ expected: `9962287286179718960/0`,
},
{
desc: "request with filters but no chunks",
request: FilterChunkRefRequest{
- Filters: []syntax.LineFilter{
- {
- Ty: 0,
- Match: "uuid",
- },
+ Plan: plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"} |= "uuid"`),
},
},
- expected: `0/0-uuid-`,
+ expected: `0/938557591`,
},
{
desc: "request with filters and chunks",
@@ -326,18 +323,11 @@ func TestFilterChunkRefRequestGetQuery(t *testing.T) {
Tenant: "test",
},
},
- Filters: []syntax.LineFilter{
- {
- Ty: 0,
- Match: "uuid",
- },
- {
- Ty: 1,
- Match: "trace",
- },
+ Plan: plan.QueryPlan{
+ AST: syntax.MustParseExpr(`{foo="bar"} |= "uuid" != "trace"`),
},
},
- expected: `8827404902424034886/0-uuid-,1-trace-`,
+ expected: `8827404902424034886/2710035654`,
},
} {
t.Run(tc.desc, func(t *testing.T) {
diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go
index f0d826c5df6d..d50ae7d1e5db 100644
--- a/pkg/logproto/logproto.pb.go
+++ b/pkg/logproto/logproto.pb.go
@@ -1784,10 +1784,12 @@ func (m *LineFilter) GetRaw() []byte {
}
type GetChunkRefRequest struct {
- From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"`
- Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"`
- Matchers string `protobuf:"bytes,3,opt,name=matchers,proto3" json:"matchers,omitempty"`
- Filters []github_com_grafana_loki_pkg_logql_syntax.LineFilter `protobuf:"bytes,4,rep,name=filters,proto3,customtype=github.com/grafana/loki/pkg/logql/syntax.LineFilter" json:"filters"`
+ From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"`
+ Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"`
+ Matchers string `protobuf:"bytes,3,opt,name=matchers,proto3" json:"matchers,omitempty"`
+ // TODO(salvacorts): Delete this field once the weekly release is done.
+ Filters []github_com_grafana_loki_pkg_logql_syntax.LineFilter `protobuf:"bytes,4,rep,name=filters,proto3,customtype=github.com/grafana/loki/pkg/logql/syntax.LineFilter" json:"filters"`
+ Plan github_com_grafana_loki_pkg_querier_plan.QueryPlan `protobuf:"bytes,5,opt,name=plan,proto3,customtype=github.com/grafana/loki/pkg/querier/plan.QueryPlan" json:"plan"`
}
func (m *GetChunkRefRequest) Reset() { *m = GetChunkRefRequest{} }
@@ -2561,149 +2563,150 @@ func init() {
func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) }
var fileDescriptor_c28a5f14f1f4c79a = []byte{
- // 2265 bytes of a gzipped FileDescriptorProto
+ // 2278 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x6f, 0x1b, 0xc7,
0x95, 0x4b, 0x2e, 0xbf, 0x1e, 0x29, 0x59, 0x1e, 0x31, 0x36, 0x41, 0xdb, 0xa4, 0x3c, 0x48, 0x1d,
0xc1, 0x71, 0xc8, 0x58, 0x6e, 0xdc, 0xd4, 0x6e, 0xd0, 0x9a, 0x52, 0xec, 0xc8, 0x96, 0x3f, 0x32,
- 0x72, 0xdd, 0xc2, 0x68, 0x61, 0xac, 0xc4, 0x11, 0x45, 0x88, 0xbb, 0x4b, 0xef, 0x0e, 0x63, 0x0b,
- 0xe8, 0xa1, 0x7f, 0xa0, 0x68, 0x6e, 0x45, 0x2f, 0x45, 0x0f, 0x05, 0x52, 0xa0, 0xc8, 0xa5, 0x3f,
+ 0x72, 0xdd, 0xc2, 0x68, 0x6b, 0xac, 0xc4, 0x11, 0x45, 0x88, 0xbb, 0x4b, 0xef, 0x0e, 0x63, 0x0b,
+ 0xe8, 0xa1, 0x7f, 0x20, 0x68, 0x6e, 0x45, 0x2f, 0x45, 0x0f, 0x05, 0x52, 0xa0, 0xe8, 0xa5, 0x3f,
0xa0, 0xbd, 0xf4, 0xe0, 0xde, 0xdc, 0x5b, 0x90, 0x03, 0x5b, 0xcb, 0x97, 0x42, 0xa7, 0xdc, 0x72,
- 0x2d, 0xe6, 0x6b, 0x77, 0x96, 0xa2, 0xdc, 0xd0, 0x75, 0x11, 0xf8, 0xc2, 0x9d, 0xf7, 0xe6, 0xcd,
- 0x9b, 0xf7, 0x35, 0xef, 0xcd, 0x1b, 0xc2, 0x89, 0xc1, 0x4e, 0xb7, 0xd5, 0xf7, 0xbb, 0x83, 0xc0,
- 0x67, 0x7e, 0x34, 0x68, 0x8a, 0x5f, 0x54, 0xd0, 0x70, 0xad, 0xd2, 0xf5, 0xbb, 0xbe, 0xa4, 0xe1,
- 0x23, 0x39, 0x5f, 0x6b, 0x74, 0x7d, 0xbf, 0xdb, 0xa7, 0x2d, 0x01, 0x6d, 0x0c, 0xb7, 0x5a, 0xac,
- 0xe7, 0xd2, 0x90, 0x39, 0xee, 0x40, 0x11, 0x2c, 0x28, 0xee, 0x0f, 0xfb, 0xae, 0xdf, 0xa1, 0xfd,
- 0x56, 0xc8, 0x1c, 0x16, 0xca, 0x5f, 0x45, 0x31, 0xcf, 0x29, 0x06, 0xc3, 0x70, 0x5b, 0xfc, 0x48,
- 0x24, 0xae, 0x00, 0x5a, 0x67, 0x01, 0x75, 0x5c, 0xe2, 0x30, 0x1a, 0x12, 0xfa, 0x70, 0x48, 0x43,
- 0x86, 0x6f, 0xc2, 0x7c, 0x02, 0x1b, 0x0e, 0x7c, 0x2f, 0xa4, 0xe8, 0x22, 0x94, 0xc2, 0x18, 0x5d,
- 0xb5, 0x16, 0x32, 0x8b, 0xa5, 0xa5, 0x4a, 0x33, 0x52, 0x25, 0x5e, 0x43, 0x4c, 0x42, 0xfc, 0x3b,
- 0x0b, 0x20, 0x9e, 0x43, 0x75, 0x00, 0x39, 0xfb, 0x91, 0x13, 0x6e, 0x57, 0xad, 0x05, 0x6b, 0xd1,
- 0x26, 0x06, 0x06, 0x9d, 0x83, 0xa3, 0x31, 0x74, 0xcb, 0x5f, 0xdf, 0x76, 0x82, 0x4e, 0x35, 0x2d,
- 0xc8, 0x0e, 0x4e, 0x20, 0x04, 0x76, 0xe0, 0x30, 0x5a, 0xcd, 0x2c, 0x58, 0x8b, 0x19, 0x22, 0xc6,
- 0xe8, 0x18, 0xe4, 0x18, 0xf5, 0x1c, 0x8f, 0x55, 0xed, 0x05, 0x6b, 0xb1, 0x48, 0x14, 0xc4, 0xf1,
- 0x5c, 0x77, 0x1a, 0x56, 0xb3, 0x0b, 0xd6, 0xe2, 0x0c, 0x51, 0x10, 0xfe, 0x2c, 0x03, 0xe5, 0x8f,
- 0x87, 0x34, 0xd8, 0x55, 0x06, 0x40, 0x75, 0x28, 0x84, 0xb4, 0x4f, 0x37, 0x99, 0x1f, 0x08, 0x01,
- 0x8b, 0xed, 0x74, 0xd5, 0x22, 0x11, 0x0e, 0x55, 0x20, 0xdb, 0xef, 0xb9, 0x3d, 0x26, 0xc4, 0x9a,
- 0x21, 0x12, 0x40, 0x97, 0x20, 0x1b, 0x32, 0x27, 0x60, 0x42, 0x96, 0xd2, 0x52, 0xad, 0x29, 0x9d,
- 0xd6, 0xd4, 0x4e, 0x6b, 0xde, 0xd5, 0x4e, 0x6b, 0x17, 0x9e, 0x8c, 0x1a, 0xa9, 0x4f, 0xff, 0xd9,
- 0xb0, 0x88, 0x5c, 0x82, 0x2e, 0x42, 0x86, 0x7a, 0x1d, 0x21, 0xef, 0x37, 0x5d, 0xc9, 0x17, 0xa0,
- 0xf3, 0x50, 0xec, 0xf4, 0x02, 0xba, 0xc9, 0x7a, 0xbe, 0x27, 0xb4, 0x9a, 0x5d, 0x9a, 0x8f, 0x3d,
- 0xb2, 0xa2, 0xa7, 0x48, 0x4c, 0x85, 0xce, 0x41, 0x2e, 0xe4, 0xa6, 0x0b, 0xab, 0xf9, 0x85, 0xcc,
- 0x62, 0xb1, 0x5d, 0xd9, 0x1f, 0x35, 0xe6, 0x24, 0xe6, 0x9c, 0xef, 0xf6, 0x18, 0x75, 0x07, 0x6c,
- 0x97, 0x28, 0x1a, 0x74, 0x16, 0xf2, 0x1d, 0xda, 0xa7, 0xdc, 0xe1, 0x05, 0xe1, 0xf0, 0x39, 0x83,
- 0xbd, 0x98, 0x20, 0x9a, 0x00, 0xdd, 0x07, 0x7b, 0xd0, 0x77, 0xbc, 0x6a, 0x51, 0x68, 0x31, 0x1b,
- 0x13, 0xde, 0xe9, 0x3b, 0x5e, 0xfb, 0xe2, 0x97, 0xa3, 0xc6, 0x52, 0xb7, 0xc7, 0xb6, 0x87, 0x1b,
- 0xcd, 0x4d, 0xdf, 0x6d, 0x75, 0x03, 0x67, 0xcb, 0xf1, 0x9c, 0x56, 0xdf, 0xdf, 0xe9, 0xb5, 0x78,
- 0x70, 0x3e, 0x1c, 0xd2, 0xa0, 0x47, 0x83, 0x16, 0xe7, 0xd1, 0x14, 0xfe, 0xe0, 0xeb, 0x88, 0xe0,
- 0x79, 0xdd, 0x2e, 0xe4, 0xe6, 0xf2, 0x78, 0x94, 0x06, 0xb4, 0xee, 0xb8, 0x83, 0x3e, 0x9d, 0xca,
- 0x5f, 0x91, 0x67, 0xd2, 0x2f, 0xed, 0x99, 0xcc, 0xb4, 0x9e, 0x89, 0xcd, 0x6c, 0x4f, 0x67, 0xe6,
- 0xec, 0x37, 0x35, 0x73, 0xee, 0xd5, 0x9b, 0x19, 0x57, 0xc1, 0xe6, 0x10, 0x9a, 0x83, 0x4c, 0xe0,
- 0x3c, 0x12, 0xc6, 0x2c, 0x13, 0x3e, 0xc4, 0x6b, 0x90, 0x93, 0x82, 0xa0, 0xda, 0xb8, 0xb5, 0x93,
- 0x27, 0x23, 0xb6, 0x74, 0x46, 0xdb, 0x70, 0x2e, 0xb6, 0x61, 0x46, 0x58, 0x07, 0xff, 0xde, 0x82,
- 0x19, 0xe5, 0x42, 0x95, 0x5d, 0x36, 0x20, 0x2f, 0x4f, 0xb7, 0xce, 0x2c, 0xc7, 0xc7, 0x33, 0xcb,
- 0x95, 0x8e, 0x33, 0x60, 0x34, 0x68, 0xb7, 0x9e, 0x8c, 0x1a, 0xd6, 0x97, 0xa3, 0xc6, 0x5b, 0x2f,
- 0xd2, 0x52, 0x24, 0x39, 0x95, 0x75, 0x34, 0x63, 0xf4, 0xb6, 0x90, 0x8e, 0x85, 0x2a, 0x0e, 0x8e,
- 0x34, 0x65, 0x82, 0x5c, 0xf5, 0xba, 0x34, 0xe4, 0x9c, 0x6d, 0xee, 0x42, 0x22, 0x69, 0xf0, 0x2f,
- 0x60, 0x3e, 0x11, 0x6a, 0x4a, 0xce, 0xf7, 0x21, 0x17, 0x72, 0x03, 0x6a, 0x31, 0x0d, 0x47, 0xad,
- 0x0b, 0x7c, 0x7b, 0x56, 0xc9, 0x97, 0x93, 0x30, 0x51, 0xf4, 0xd3, 0xed, 0xfe, 0x37, 0x0b, 0xca,
- 0x6b, 0xce, 0x06, 0xed, 0xeb, 0x18, 0x47, 0x60, 0x7b, 0x8e, 0x4b, 0x95, 0xc5, 0xc5, 0x98, 0x27,
- 0xb4, 0x4f, 0x9c, 0xfe, 0x90, 0x4a, 0x96, 0x05, 0xa2, 0xa0, 0x69, 0x33, 0x91, 0xf5, 0xd2, 0x99,
- 0xc8, 0x8a, 0xe3, 0xbd, 0x02, 0x59, 0x1e, 0x59, 0xbb, 0x22, 0x0b, 0x15, 0x89, 0x04, 0xf0, 0x5b,
- 0x30, 0xa3, 0xb4, 0x50, 0xe6, 0x8b, 0x45, 0xe6, 0xe6, 0x2b, 0x6a, 0x91, 0xb1, 0x0b, 0x39, 0x69,
- 0x6d, 0xf4, 0x26, 0x14, 0xa3, 0xea, 0x26, 0xb4, 0xcd, 0xb4, 0x73, 0xfb, 0xa3, 0x46, 0x9a, 0x85,
- 0x24, 0x9e, 0x40, 0x0d, 0xc8, 0x8a, 0x95, 0x42, 0x73, 0xab, 0x5d, 0xdc, 0x1f, 0x35, 0x24, 0x82,
- 0xc8, 0x0f, 0x3a, 0x09, 0xf6, 0x36, 0x2f, 0x30, 0xdc, 0x04, 0x76, 0xbb, 0xb0, 0x3f, 0x6a, 0x08,
- 0x98, 0x88, 0x5f, 0x7c, 0x0d, 0xca, 0x6b, 0xb4, 0xeb, 0x6c, 0xee, 0xaa, 0x4d, 0x2b, 0x9a, 0x1d,
- 0xdf, 0xd0, 0xd2, 0x3c, 0x4e, 0x43, 0x39, 0xda, 0xf1, 0x81, 0x1b, 0xaa, 0xa0, 0x2e, 0x45, 0xb8,
- 0x9b, 0x21, 0xfe, 0xad, 0x05, 0xca, 0xcf, 0x08, 0x43, 0xae, 0xcf, 0x75, 0x0d, 0x55, 0x0e, 0x82,
- 0xfd, 0x51, 0x43, 0x61, 0x88, 0xfa, 0xa2, 0xcb, 0x90, 0x0f, 0xc5, 0x8e, 0x9c, 0xd9, 0x78, 0xf8,
- 0x88, 0x89, 0xf6, 0x11, 0x1e, 0x06, 0xfb, 0xa3, 0x86, 0x26, 0x24, 0x7a, 0x80, 0x9a, 0x89, 0xca,
- 0x29, 0x15, 0x9b, 0xdd, 0x1f, 0x35, 0x0c, 0xac, 0x59, 0x49, 0xf1, 0xd7, 0x16, 0x94, 0xee, 0x3a,
- 0xbd, 0x28, 0x84, 0xaa, 0xda, 0x45, 0x71, 0x8e, 0x94, 0x08, 0x7e, 0xa4, 0x3b, 0xb4, 0xef, 0xec,
- 0x5e, 0xf5, 0x03, 0xc1, 0x77, 0x86, 0x44, 0x70, 0x5c, 0xec, 0xec, 0x89, 0xc5, 0x2e, 0x3b, 0x7d,
- 0x4a, 0xfd, 0x3f, 0x26, 0xb0, 0xeb, 0x76, 0x21, 0x3d, 0x97, 0xc1, 0x9f, 0x5b, 0x50, 0x96, 0x9a,
- 0xab, 0xb0, 0xfb, 0x19, 0xe4, 0xa4, 0x61, 0x84, 0xee, 0x2f, 0x48, 0x2e, 0x6f, 0x4f, 0x93, 0x58,
- 0x14, 0x4f, 0xf4, 0x43, 0x98, 0xed, 0x04, 0xfe, 0x60, 0x40, 0x3b, 0xeb, 0x2a, 0x85, 0xa5, 0xc7,
- 0x53, 0xd8, 0x8a, 0x39, 0x4f, 0xc6, 0xc8, 0xf1, 0xdf, 0x2d, 0x98, 0x51, 0xd9, 0x42, 0xf9, 0x2a,
- 0xb2, 0xaf, 0xf5, 0xd2, 0x25, 0x2b, 0x3d, 0x6d, 0xc9, 0x3a, 0x06, 0xb9, 0x6e, 0xe0, 0x0f, 0x07,
- 0x61, 0x35, 0x23, 0xcf, 0xa6, 0x84, 0xa6, 0x2b, 0x65, 0xf8, 0x3a, 0xcc, 0x6a, 0x55, 0x0e, 0x49,
- 0x99, 0xb5, 0xf1, 0x94, 0xb9, 0xda, 0xa1, 0x1e, 0xeb, 0x6d, 0xf5, 0xa2, 0x24, 0xa8, 0xe8, 0xf1,
- 0xaf, 0x2d, 0x98, 0x1b, 0x27, 0x41, 0x2b, 0xc6, 0x39, 0xe3, 0xec, 0xce, 0x1c, 0xce, 0xae, 0x29,
- 0x92, 0x4f, 0xf8, 0xa1, 0xc7, 0x82, 0x5d, 0xcd, 0x5a, 0xae, 0xad, 0xbd, 0x07, 0x25, 0x63, 0x92,
- 0x97, 0xa8, 0x1d, 0xaa, 0x4e, 0x06, 0xe1, 0xc3, 0x38, 0x25, 0xa4, 0x65, 0x42, 0x13, 0x00, 0xfe,
- 0x8d, 0x05, 0x33, 0x09, 0x5f, 0xa2, 0xf7, 0xc1, 0xde, 0x0a, 0x7c, 0x77, 0x2a, 0x47, 0x89, 0x15,
- 0xe8, 0xbb, 0x90, 0x66, 0xfe, 0x54, 0x6e, 0x4a, 0x33, 0x9f, 0x7b, 0x49, 0xa9, 0x9f, 0x91, 0xb7,
- 0x5b, 0x09, 0xe1, 0xf7, 0xa0, 0x28, 0x14, 0xba, 0xe3, 0xf4, 0x82, 0x89, 0xd5, 0x62, 0xb2, 0x42,
- 0x97, 0xe1, 0x88, 0xcc, 0x84, 0x93, 0x17, 0x97, 0x27, 0x2d, 0x2e, 0xeb, 0xc5, 0x27, 0x20, 0xbb,
- 0xbc, 0x3d, 0xf4, 0x76, 0xf8, 0x92, 0x8e, 0xc3, 0x1c, 0xbd, 0x84, 0x8f, 0xf1, 0x1b, 0x30, 0xcf,
- 0xcf, 0x20, 0x0d, 0xc2, 0x65, 0x7f, 0xe8, 0x31, 0xdd, 0x5d, 0x9c, 0x83, 0x4a, 0x12, 0xad, 0xa2,
- 0xa4, 0x02, 0xd9, 0x4d, 0x8e, 0x10, 0x3c, 0x66, 0x88, 0x04, 0xf0, 0x1f, 0x2c, 0x40, 0xd7, 0x28,
- 0x13, 0xbb, 0xac, 0xae, 0x44, 0xc7, 0xa3, 0x06, 0x05, 0xd7, 0x61, 0x9b, 0xdb, 0x34, 0x08, 0xf5,
- 0x1d, 0x44, 0xc3, 0xdf, 0xc6, 0x6d, 0x0f, 0x9f, 0x87, 0xf9, 0x84, 0x94, 0x4a, 0xa7, 0x1a, 0x14,
- 0x36, 0x15, 0x4e, 0xd5, 0xbb, 0x08, 0xc6, 0x7f, 0x4e, 0x43, 0x41, 0x2c, 0x20, 0x74, 0x0b, 0x9d,
- 0x87, 0xd2, 0x56, 0xcf, 0xeb, 0xd2, 0x60, 0x10, 0xf4, 0x94, 0x09, 0xec, 0xf6, 0x91, 0xfd, 0x51,
- 0xc3, 0x44, 0x13, 0x13, 0x40, 0xef, 0x40, 0x7e, 0x18, 0xd2, 0xe0, 0x41, 0x4f, 0x9e, 0xf4, 0x62,
- 0xbb, 0xb2, 0x37, 0x6a, 0xe4, 0x7e, 0x1c, 0xd2, 0x60, 0x75, 0x85, 0x57, 0x9e, 0xa1, 0x18, 0x11,
- 0xf9, 0xed, 0xa0, 0x1b, 0x2a, 0x4c, 0xc5, 0x25, 0xac, 0xfd, 0x3d, 0x2e, 0xfe, 0x58, 0xaa, 0x1b,
- 0x04, 0xbe, 0x4b, 0xd9, 0x36, 0x1d, 0x86, 0xad, 0x4d, 0xdf, 0x75, 0x7d, 0xaf, 0x25, 0x7a, 0x49,
- 0xa1, 0x34, 0x2f, 0x9f, 0x7c, 0xb9, 0x8a, 0xdc, 0xbb, 0x90, 0x67, 0xdb, 0x81, 0x3f, 0xec, 0x6e,
- 0x8b, 0xaa, 0x90, 0x69, 0x5f, 0x9a, 0x9e, 0x9f, 0xe6, 0x40, 0xf4, 0x00, 0x9d, 0xe6, 0xd6, 0xa2,
- 0x9b, 0x3b, 0xe1, 0xd0, 0x95, 0x1d, 0x5a, 0x3b, 0xbb, 0x3f, 0x6a, 0x58, 0xef, 0x90, 0x08, 0x8d,
- 0x7f, 0x95, 0x86, 0x86, 0x08, 0xd4, 0x7b, 0xe2, 0xda, 0x70, 0xd5, 0x0f, 0x6e, 0x52, 0x16, 0xf4,
- 0x36, 0x6f, 0x39, 0x2e, 0xd5, 0xb1, 0xd1, 0x80, 0x92, 0x2b, 0x90, 0x0f, 0x8c, 0x23, 0x00, 0x6e,
- 0x44, 0x87, 0x4e, 0x01, 0x88, 0x33, 0x23, 0xe7, 0xe5, 0x69, 0x28, 0x0a, 0x8c, 0x98, 0x5e, 0x4e,
- 0x58, 0xaa, 0x35, 0xa5, 0x66, 0xca, 0x42, 0xab, 0xe3, 0x16, 0x9a, 0x9a, 0x4f, 0x64, 0x16, 0x33,
- 0xd6, 0xb3, 0xc9, 0x58, 0xc7, 0xff, 0xb0, 0xa0, 0xbe, 0xa6, 0x25, 0x7f, 0x49, 0x73, 0x68, 0x7d,
- 0xd3, 0xaf, 0x48, 0xdf, 0xcc, 0xff, 0xa6, 0x2f, 0xae, 0x03, 0xac, 0xf5, 0x3c, 0x7a, 0xb5, 0xd7,
- 0x67, 0x34, 0x98, 0xd0, 0x89, 0x7c, 0x9e, 0x8e, 0x53, 0x02, 0xa1, 0x5b, 0x5a, 0xcf, 0x65, 0x23,
- 0x0f, 0xbf, 0x0a, 0x35, 0xd2, 0xaf, 0xd0, 0x6d, 0x99, 0xb1, 0x14, 0xb5, 0x03, 0xf9, 0x2d, 0xa1,
- 0x9e, 0x2c, 0xa9, 0x89, 0x67, 0x94, 0x58, 0xf7, 0xf6, 0x65, 0xb5, 0xf9, 0x85, 0x17, 0x5d, 0x48,
- 0xc4, 0xab, 0x4f, 0x2b, 0xdc, 0xf5, 0x98, 0xf3, 0xd8, 0x58, 0x4c, 0xf4, 0x0e, 0xf8, 0x83, 0x38,
- 0x37, 0x09, 0x73, 0xa9, 0xdc, 0x74, 0x06, 0xec, 0x80, 0x6e, 0xe9, 0x22, 0x8a, 0x62, 0x01, 0x22,
- 0x4a, 0x31, 0x8f, 0xff, 0x62, 0xc1, 0xdc, 0x35, 0xca, 0x92, 0xd7, 0x93, 0xd7, 0xc8, 0xd8, 0xf8,
- 0x23, 0x38, 0x6a, 0xc8, 0xaf, 0xb4, 0xbf, 0x30, 0x76, 0x27, 0x79, 0x23, 0xd6, 0x7f, 0xd5, 0xeb,
- 0xd0, 0xc7, 0xaa, 0x97, 0x4b, 0x5e, 0x47, 0xee, 0x40, 0xc9, 0x98, 0x44, 0x57, 0xc6, 0x2e, 0x22,
- 0xc6, 0xcb, 0x4b, 0x54, 0x4c, 0xdb, 0x15, 0xa5, 0x93, 0xec, 0xe6, 0xd4, 0x35, 0x33, 0x2a, 0xda,
- 0xeb, 0x80, 0xc4, 0x0d, 0x56, 0xb0, 0x35, 0xcb, 0x86, 0xc0, 0xde, 0x88, 0x6e, 0x24, 0x11, 0x8c,
- 0x4e, 0x83, 0x1d, 0xf8, 0x8f, 0xf4, 0x0d, 0x73, 0x26, 0xde, 0x92, 0xf8, 0x8f, 0x88, 0x98, 0xc2,
- 0x97, 0x21, 0x43, 0xfc, 0x47, 0xa8, 0x0e, 0x10, 0x38, 0x5e, 0x97, 0xde, 0x8b, 0x1a, 0x9b, 0x32,
- 0x31, 0x30, 0x87, 0x94, 0xf4, 0x65, 0x38, 0x6a, 0x4a, 0x24, 0xdd, 0xdd, 0x84, 0xfc, 0xc7, 0x43,
- 0xd3, 0x5c, 0x95, 0x31, 0x73, 0xc9, 0x1e, 0x59, 0x13, 0xf1, 0x98, 0x81, 0x18, 0x8f, 0x4e, 0x42,
- 0x91, 0x39, 0x1b, 0x7d, 0x7a, 0x2b, 0x4e, 0x40, 0x31, 0x82, 0xcf, 0xf2, 0x9e, 0xec, 0x9e, 0x71,
- 0x37, 0x89, 0x11, 0xe8, 0x2c, 0xcc, 0xc5, 0x32, 0xdf, 0x09, 0xe8, 0x56, 0xef, 0xb1, 0xf0, 0x70,
- 0x99, 0x1c, 0xc0, 0xa3, 0x45, 0x38, 0x12, 0xe3, 0xd6, 0xc5, 0x1d, 0xc0, 0x16, 0xa4, 0xe3, 0x68,
- 0x6e, 0x1b, 0xa1, 0xee, 0x87, 0x0f, 0x87, 0x4e, 0x5f, 0x64, 0xd5, 0x32, 0x31, 0x30, 0xf8, 0xaf,
- 0x16, 0x1c, 0x95, 0xae, 0xe6, 0xdd, 0xf8, 0xeb, 0x18, 0xf5, 0x9f, 0x59, 0x80, 0x4c, 0x0d, 0x54,
- 0x68, 0x7d, 0xc7, 0x7c, 0x66, 0xe1, 0x97, 0x8c, 0x92, 0x68, 0x35, 0x25, 0x2a, 0x7e, 0x29, 0xc1,
- 0x90, 0x13, 0x17, 0x15, 0xd9, 0xf3, 0xda, 0xb2, 0x97, 0x95, 0x18, 0xa2, 0xbe, 0xbc, 0x05, 0xdf,
- 0xd8, 0x65, 0x34, 0x54, 0x9d, 0xa8, 0x68, 0xc1, 0x05, 0x82, 0xc8, 0x0f, 0xdf, 0x8b, 0x7a, 0x4c,
- 0x44, 0x8d, 0x1d, 0xef, 0xa5, 0x50, 0x44, 0x0f, 0xf0, 0x9f, 0xd2, 0x30, 0x73, 0xcf, 0xef, 0x0f,
- 0xe3, 0x92, 0xf5, 0x3a, 0xa5, 0xf2, 0x44, 0x7b, 0x9c, 0xd5, 0xed, 0x31, 0x02, 0x3b, 0x64, 0x74,
- 0x20, 0x22, 0x2b, 0x43, 0xc4, 0x18, 0x61, 0x28, 0x33, 0x27, 0xe8, 0x52, 0x26, 0xfb, 0x8e, 0x6a,
- 0x4e, 0x5c, 0x08, 0x13, 0x38, 0xb4, 0x00, 0x25, 0xa7, 0xdb, 0x0d, 0x68, 0xd7, 0x61, 0xb4, 0xbd,
- 0x5b, 0xcd, 0x8b, 0xcd, 0x4c, 0x14, 0xfe, 0x29, 0xcc, 0x6a, 0x63, 0x29, 0x97, 0xbe, 0x0b, 0xf9,
- 0x4f, 0x04, 0x66, 0xc2, 0x93, 0x94, 0x24, 0x55, 0x69, 0x4c, 0x93, 0x25, 0xdf, 0xaf, 0xb5, 0xcc,
- 0xf8, 0x3a, 0xe4, 0x24, 0x39, 0x3a, 0x69, 0x76, 0x0f, 0xf2, 0xed, 0x84, 0xc3, 0xaa, 0x15, 0xc0,
- 0x90, 0x93, 0x8c, 0x94, 0xe3, 0x45, 0x6c, 0x48, 0x0c, 0x51, 0xdf, 0xb3, 0x67, 0xa0, 0x18, 0x3d,
- 0x3e, 0xa3, 0x12, 0xe4, 0xaf, 0xde, 0x26, 0x3f, 0xb9, 0x42, 0x56, 0xe6, 0x52, 0xa8, 0x0c, 0x85,
- 0xf6, 0x95, 0xe5, 0x1b, 0x02, 0xb2, 0x96, 0xbe, 0xb6, 0x75, 0x66, 0x09, 0xd0, 0x0f, 0x20, 0x2b,
- 0xd3, 0xc5, 0xb1, 0x58, 0x7e, 0xf3, 0x99, 0xb7, 0x76, 0xfc, 0x00, 0x5e, 0x5a, 0x00, 0xa7, 0xde,
- 0xb5, 0xd0, 0x2d, 0x28, 0x09, 0xa4, 0x7a, 0xd0, 0x39, 0x39, 0xfe, 0xae, 0x92, 0xe0, 0x74, 0xea,
- 0x90, 0x59, 0x83, 0xdf, 0x25, 0xc8, 0x0a, 0x9f, 0x98, 0xd2, 0x98, 0x0f, 0x72, 0xa6, 0x34, 0x89,
- 0x27, 0x2e, 0x9c, 0x42, 0xdf, 0x07, 0x9b, 0xb7, 0x38, 0xc8, 0x28, 0x2a, 0xc6, 0x3b, 0x4c, 0xed,
- 0xd8, 0x38, 0xda, 0xd8, 0xf6, 0x83, 0xe8, 0x39, 0xe9, 0xf8, 0x78, 0x5b, 0xab, 0x97, 0x57, 0x0f,
- 0x4e, 0x44, 0x3b, 0xdf, 0x96, 0xef, 0x1e, 0xba, 0xb9, 0x42, 0xa7, 0x92, 0x5b, 0x8d, 0xf5, 0x62,
- 0xb5, 0xfa, 0x61, 0xd3, 0x11, 0xc3, 0x35, 0x28, 0x19, 0x8d, 0x8d, 0x69, 0xd6, 0x83, 0x5d, 0x99,
- 0x69, 0xd6, 0x09, 0xdd, 0x10, 0x4e, 0xa1, 0x6b, 0x50, 0xe0, 0xa5, 0x98, 0x67, 0x24, 0x74, 0x62,
- 0xbc, 0xe2, 0x1a, 0x99, 0xb6, 0x76, 0x72, 0xf2, 0x64, 0xc4, 0xe8, 0x47, 0x50, 0xbc, 0x46, 0x99,
- 0x0a, 0xd7, 0xe3, 0xe3, 0xf1, 0x3e, 0xc1, 0x52, 0xc9, 0x33, 0x83, 0x53, 0x4b, 0x3f, 0xd7, 0x7f,
- 0x4a, 0xad, 0x38, 0xcc, 0x41, 0xb7, 0x61, 0x56, 0x08, 0x16, 0xfd, 0x6b, 0x95, 0x08, 0xa0, 0x03,
- 0x7f, 0x91, 0x25, 0x02, 0xe8, 0xe0, 0x5f, 0x65, 0x38, 0xd5, 0xbe, 0xff, 0xf4, 0x59, 0x3d, 0xf5,
- 0xc5, 0xb3, 0x7a, 0xea, 0xab, 0x67, 0x75, 0xeb, 0x97, 0x7b, 0x75, 0xeb, 0x8f, 0x7b, 0x75, 0xeb,
- 0xc9, 0x5e, 0xdd, 0x7a, 0xba, 0x57, 0xb7, 0xfe, 0xb5, 0x57, 0xb7, 0xfe, 0xbd, 0x57, 0x4f, 0x7d,
- 0xb5, 0x57, 0xb7, 0x3e, 0x7d, 0x5e, 0x4f, 0x3d, 0x7d, 0x5e, 0x4f, 0x7d, 0xf1, 0xbc, 0x9e, 0xba,
- 0xff, 0xe6, 0x7f, 0xb9, 0xe8, 0xc9, 0x46, 0x34, 0x27, 0x3e, 0x17, 0xfe, 0x13, 0x00, 0x00, 0xff,
- 0xff, 0xb0, 0x19, 0x00, 0xf7, 0x53, 0x1c, 0x00, 0x00,
+ 0x2d, 0xe6, 0x6b, 0x77, 0x96, 0xa2, 0xdd, 0x50, 0x75, 0x51, 0xf8, 0xc2, 0x9d, 0x79, 0xf3, 0xe6,
+ 0xcd, 0xfb, 0x9a, 0xf7, 0x31, 0x84, 0x13, 0x83, 0x9d, 0x6e, 0xab, 0xef, 0x77, 0x07, 0x81, 0xcf,
+ 0xfc, 0x68, 0xd0, 0x14, 0xbf, 0xa8, 0xa0, 0xe7, 0xb5, 0x4a, 0xd7, 0xef, 0xfa, 0x12, 0x87, 0x8f,
+ 0xe4, 0x7a, 0xad, 0xd1, 0xf5, 0xfd, 0x6e, 0x9f, 0xb6, 0xc4, 0x6c, 0x63, 0xb8, 0xd5, 0x62, 0x3d,
+ 0x97, 0x86, 0xcc, 0x71, 0x07, 0x0a, 0x61, 0x41, 0x51, 0x7f, 0xd8, 0x77, 0xfd, 0x0e, 0xed, 0xb7,
+ 0x42, 0xe6, 0xb0, 0x50, 0xfe, 0x2a, 0x8c, 0x79, 0x8e, 0x31, 0x18, 0x86, 0xdb, 0xe2, 0x47, 0x02,
+ 0x71, 0x05, 0xd0, 0x3a, 0x0b, 0xa8, 0xe3, 0x12, 0x87, 0xd1, 0x90, 0xd0, 0x87, 0x43, 0x1a, 0x32,
+ 0x7c, 0x13, 0xe6, 0x13, 0xd0, 0x70, 0xe0, 0x7b, 0x21, 0x45, 0x17, 0xa1, 0x14, 0xc6, 0xe0, 0xaa,
+ 0xb5, 0x90, 0x59, 0x2c, 0x2d, 0x55, 0x9a, 0x91, 0x28, 0xf1, 0x1e, 0x62, 0x22, 0xe2, 0xdf, 0x58,
+ 0x00, 0xf1, 0x1a, 0xaa, 0x03, 0xc8, 0xd5, 0x8f, 0x9c, 0x70, 0xbb, 0x6a, 0x2d, 0x58, 0x8b, 0x36,
+ 0x31, 0x20, 0xe8, 0x1c, 0x1c, 0x8d, 0x67, 0xb7, 0xfc, 0xf5, 0x6d, 0x27, 0xe8, 0x54, 0xd3, 0x02,
+ 0xed, 0xe0, 0x02, 0x42, 0x60, 0x07, 0x0e, 0xa3, 0xd5, 0xcc, 0x82, 0xb5, 0x98, 0x21, 0x62, 0x8c,
+ 0x8e, 0x41, 0x8e, 0x51, 0xcf, 0xf1, 0x58, 0xd5, 0x5e, 0xb0, 0x16, 0x8b, 0x44, 0xcd, 0x38, 0x9c,
+ 0xcb, 0x4e, 0xc3, 0x6a, 0x76, 0xc1, 0x5a, 0x9c, 0x21, 0x6a, 0x86, 0x3f, 0xcf, 0x40, 0xf9, 0xe3,
+ 0x21, 0x0d, 0x76, 0x95, 0x02, 0x50, 0x1d, 0x0a, 0x21, 0xed, 0xd3, 0x4d, 0xe6, 0x07, 0x82, 0xc1,
+ 0x62, 0x3b, 0x5d, 0xb5, 0x48, 0x04, 0x43, 0x15, 0xc8, 0xf6, 0x7b, 0x6e, 0x8f, 0x09, 0xb6, 0x66,
+ 0x88, 0x9c, 0xa0, 0x4b, 0x90, 0x0d, 0x99, 0x13, 0x30, 0xc1, 0x4b, 0x69, 0xa9, 0xd6, 0x94, 0x46,
+ 0x6b, 0x6a, 0xa3, 0x35, 0xef, 0x6a, 0xa3, 0xb5, 0x0b, 0x4f, 0x46, 0x8d, 0xd4, 0x67, 0xff, 0x68,
+ 0x58, 0x44, 0x6e, 0x41, 0x17, 0x21, 0x43, 0xbd, 0x8e, 0xe0, 0xf7, 0x9b, 0xee, 0xe4, 0x1b, 0xd0,
+ 0x79, 0x28, 0x76, 0x7a, 0x01, 0xdd, 0x64, 0x3d, 0xdf, 0x13, 0x52, 0xcd, 0x2e, 0xcd, 0xc7, 0x16,
+ 0x59, 0xd1, 0x4b, 0x24, 0xc6, 0x42, 0xe7, 0x20, 0x17, 0x72, 0xd5, 0x85, 0xd5, 0xfc, 0x42, 0x66,
+ 0xb1, 0xd8, 0xae, 0xec, 0x8f, 0x1a, 0x73, 0x12, 0x72, 0xce, 0x77, 0x7b, 0x8c, 0xba, 0x03, 0xb6,
+ 0x4b, 0x14, 0x0e, 0x3a, 0x0b, 0xf9, 0x0e, 0xed, 0x53, 0x6e, 0xf0, 0x82, 0x30, 0xf8, 0x9c, 0x41,
+ 0x5e, 0x2c, 0x10, 0x8d, 0x80, 0xee, 0x83, 0x3d, 0xe8, 0x3b, 0x5e, 0xb5, 0x28, 0xa4, 0x98, 0x8d,
+ 0x11, 0xef, 0xf4, 0x1d, 0xaf, 0x7d, 0xf1, 0xcb, 0x51, 0x63, 0xa9, 0xdb, 0x63, 0xdb, 0xc3, 0x8d,
+ 0xe6, 0xa6, 0xef, 0xb6, 0xba, 0x81, 0xb3, 0xe5, 0x78, 0x4e, 0xab, 0xef, 0xef, 0xf4, 0x5a, 0xdc,
+ 0x39, 0x1f, 0x0e, 0x69, 0xd0, 0xa3, 0x41, 0x8b, 0xd3, 0x68, 0x0a, 0x7b, 0xf0, 0x7d, 0x44, 0xd0,
+ 0xbc, 0x6e, 0x17, 0x72, 0x73, 0x79, 0x3c, 0x4a, 0x03, 0x5a, 0x77, 0xdc, 0x41, 0x9f, 0x4e, 0x65,
+ 0xaf, 0xc8, 0x32, 0xe9, 0x43, 0x5b, 0x26, 0x33, 0xad, 0x65, 0x62, 0x35, 0xdb, 0xd3, 0xa9, 0x39,
+ 0xfb, 0x4d, 0xd5, 0x9c, 0x7b, 0xf5, 0x6a, 0xc6, 0x55, 0xb0, 0xf9, 0x0c, 0xcd, 0x41, 0x26, 0x70,
+ 0x1e, 0x09, 0x65, 0x96, 0x09, 0x1f, 0xe2, 0x35, 0xc8, 0x49, 0x46, 0x50, 0x6d, 0x5c, 0xdb, 0xc9,
+ 0x9b, 0x11, 0x6b, 0x3a, 0xa3, 0x75, 0x38, 0x17, 0xeb, 0x30, 0x23, 0xb4, 0x83, 0x7f, 0x6b, 0xc1,
+ 0x8c, 0x32, 0xa1, 0x8a, 0x2e, 0x1b, 0x90, 0x97, 0xb7, 0x5b, 0x47, 0x96, 0xe3, 0xe3, 0x91, 0xe5,
+ 0x4a, 0xc7, 0x19, 0x30, 0x1a, 0xb4, 0x5b, 0x4f, 0x46, 0x0d, 0xeb, 0xcb, 0x51, 0xe3, 0xad, 0x97,
+ 0x49, 0x29, 0x82, 0x9c, 0x8a, 0x3a, 0x9a, 0x30, 0x7a, 0x5b, 0x70, 0xc7, 0x42, 0xe5, 0x07, 0x47,
+ 0x9a, 0x32, 0x40, 0xae, 0x7a, 0x5d, 0x1a, 0x72, 0xca, 0x36, 0x37, 0x21, 0x91, 0x38, 0xf8, 0xe7,
+ 0x30, 0x9f, 0x70, 0x35, 0xc5, 0xe7, 0xfb, 0x90, 0x0b, 0xb9, 0x02, 0x35, 0x9b, 0x86, 0xa1, 0xd6,
+ 0x05, 0xbc, 0x3d, 0xab, 0xf8, 0xcb, 0xc9, 0x39, 0x51, 0xf8, 0xd3, 0x9d, 0xfe, 0x57, 0x0b, 0xca,
+ 0x6b, 0xce, 0x06, 0xed, 0x6b, 0x1f, 0x47, 0x60, 0x7b, 0x8e, 0x4b, 0x95, 0xc6, 0xc5, 0x98, 0x07,
+ 0xb4, 0x4f, 0x9c, 0xfe, 0x90, 0x4a, 0x92, 0x05, 0xa2, 0x66, 0xd3, 0x46, 0x22, 0xeb, 0xd0, 0x91,
+ 0xc8, 0x8a, 0xfd, 0xbd, 0x02, 0x59, 0xee, 0x59, 0xbb, 0x22, 0x0a, 0x15, 0x89, 0x9c, 0xe0, 0xb7,
+ 0x60, 0x46, 0x49, 0xa1, 0xd4, 0x17, 0xb3, 0xcc, 0xd5, 0x57, 0xd4, 0x2c, 0x63, 0x17, 0x72, 0x52,
+ 0xdb, 0xe8, 0x4d, 0x28, 0x46, 0xd9, 0x4d, 0x48, 0x9b, 0x69, 0xe7, 0xf6, 0x47, 0x8d, 0x34, 0x0b,
+ 0x49, 0xbc, 0x80, 0x1a, 0x90, 0x15, 0x3b, 0x85, 0xe4, 0x56, 0xbb, 0xb8, 0x3f, 0x6a, 0x48, 0x00,
+ 0x91, 0x1f, 0x74, 0x12, 0xec, 0x6d, 0x9e, 0x60, 0xb8, 0x0a, 0xec, 0x76, 0x61, 0x7f, 0xd4, 0x10,
+ 0x73, 0x22, 0x7e, 0xf1, 0x35, 0x28, 0xaf, 0xd1, 0xae, 0xb3, 0xb9, 0xab, 0x0e, 0xad, 0x68, 0x72,
+ 0xfc, 0x40, 0x4b, 0xd3, 0x38, 0x0d, 0xe5, 0xe8, 0xc4, 0x07, 0x6e, 0xa8, 0x9c, 0xba, 0x14, 0xc1,
+ 0x6e, 0x86, 0xf8, 0xd7, 0x16, 0x28, 0x3b, 0x23, 0x0c, 0xb9, 0x3e, 0x97, 0x35, 0x54, 0x31, 0x08,
+ 0xf6, 0x47, 0x0d, 0x05, 0x21, 0xea, 0x8b, 0x2e, 0x43, 0x3e, 0x14, 0x27, 0x72, 0x62, 0xe3, 0xee,
+ 0x23, 0x16, 0xda, 0x47, 0xb8, 0x1b, 0xec, 0x8f, 0x1a, 0x1a, 0x91, 0xe8, 0x01, 0x6a, 0x26, 0x32,
+ 0xa7, 0x14, 0x6c, 0x76, 0x7f, 0xd4, 0x30, 0xa0, 0x66, 0x26, 0xc5, 0x5f, 0x5b, 0x50, 0xba, 0xeb,
+ 0xf4, 0x22, 0x17, 0xaa, 0x6a, 0x13, 0xc5, 0x31, 0x52, 0x02, 0xf8, 0x95, 0xee, 0xd0, 0xbe, 0xb3,
+ 0x7b, 0xd5, 0x0f, 0x04, 0xdd, 0x19, 0x12, 0xcd, 0xe3, 0x64, 0x67, 0x4f, 0x4c, 0x76, 0xd9, 0xe9,
+ 0x43, 0xea, 0xff, 0x30, 0x80, 0x5d, 0xb7, 0x0b, 0xe9, 0xb9, 0x0c, 0xfe, 0xa3, 0x05, 0x65, 0x29,
+ 0xb9, 0x72, 0xbb, 0x9f, 0x40, 0x4e, 0x2a, 0x46, 0xc8, 0xfe, 0x92, 0xe0, 0xf2, 0xf6, 0x34, 0x81,
+ 0x45, 0xd1, 0x44, 0xdf, 0x87, 0xd9, 0x4e, 0xe0, 0x0f, 0x06, 0xb4, 0xb3, 0xae, 0x42, 0x58, 0x7a,
+ 0x3c, 0x84, 0xad, 0x98, 0xeb, 0x64, 0x0c, 0x1d, 0xff, 0xcd, 0x82, 0x19, 0x15, 0x2d, 0x94, 0xad,
+ 0x22, 0xfd, 0x5a, 0x87, 0x4e, 0x59, 0xe9, 0x69, 0x53, 0xd6, 0x31, 0xc8, 0x75, 0x03, 0x7f, 0x38,
+ 0x08, 0xab, 0x19, 0x79, 0x37, 0xe5, 0x6c, 0xba, 0x54, 0x86, 0xaf, 0xc3, 0xac, 0x16, 0xe5, 0x05,
+ 0x21, 0xb3, 0x36, 0x1e, 0x32, 0x57, 0x3b, 0xd4, 0x63, 0xbd, 0xad, 0x5e, 0x14, 0x04, 0x15, 0x3e,
+ 0xfe, 0xa5, 0x05, 0x73, 0xe3, 0x28, 0x68, 0xc5, 0xb8, 0x67, 0x9c, 0xdc, 0x99, 0x17, 0x93, 0x6b,
+ 0x8a, 0xe0, 0x13, 0x7e, 0xe8, 0xb1, 0x60, 0x57, 0x93, 0x96, 0x7b, 0x6b, 0xef, 0x41, 0xc9, 0x58,
+ 0xe4, 0x29, 0x6a, 0x87, 0xaa, 0x9b, 0x41, 0xf8, 0x30, 0x0e, 0x09, 0x69, 0x19, 0xd0, 0xc4, 0x04,
+ 0xff, 0xca, 0x82, 0x99, 0x84, 0x2d, 0xd1, 0xfb, 0x60, 0x6f, 0x05, 0xbe, 0x3b, 0x95, 0xa1, 0xc4,
+ 0x0e, 0xf4, 0x6d, 0x48, 0x33, 0x7f, 0x2a, 0x33, 0xa5, 0x99, 0xcf, 0xad, 0xa4, 0xc4, 0xcf, 0xc8,
+ 0xea, 0x56, 0xce, 0xf0, 0x7b, 0x50, 0x14, 0x02, 0xdd, 0x71, 0x7a, 0xc1, 0xc4, 0x6c, 0x31, 0x59,
+ 0xa0, 0xcb, 0x70, 0x44, 0x46, 0xc2, 0xc9, 0x9b, 0xcb, 0x93, 0x36, 0x97, 0xf5, 0xe6, 0x13, 0x90,
+ 0x5d, 0xde, 0x1e, 0x7a, 0x3b, 0x7c, 0x4b, 0xc7, 0x61, 0x8e, 0xde, 0xc2, 0xc7, 0xf8, 0x0d, 0x98,
+ 0xe7, 0x77, 0x90, 0x06, 0xe1, 0xb2, 0x3f, 0xf4, 0x98, 0xee, 0x2e, 0xce, 0x41, 0x25, 0x09, 0x56,
+ 0x5e, 0x52, 0x81, 0xec, 0x26, 0x07, 0x08, 0x1a, 0x33, 0x44, 0x4e, 0xf0, 0xef, 0x2c, 0x40, 0xd7,
+ 0x28, 0x13, 0xa7, 0xac, 0xae, 0x44, 0xd7, 0xa3, 0x06, 0x05, 0xd7, 0x61, 0x9b, 0xdb, 0x34, 0x08,
+ 0x75, 0x0d, 0xa2, 0xe7, 0xff, 0x8f, 0x6a, 0x0f, 0x9f, 0x87, 0xf9, 0x04, 0x97, 0x4a, 0xa6, 0x1a,
+ 0x14, 0x36, 0x15, 0x4c, 0xe5, 0xbb, 0x68, 0x8e, 0xff, 0x94, 0x86, 0x82, 0xd8, 0x40, 0xe8, 0x16,
+ 0x3a, 0x0f, 0xa5, 0xad, 0x9e, 0xd7, 0xa5, 0xc1, 0x20, 0xe8, 0x29, 0x15, 0xd8, 0xed, 0x23, 0xfb,
+ 0xa3, 0x86, 0x09, 0x26, 0xe6, 0x04, 0xbd, 0x03, 0xf9, 0x61, 0x48, 0x83, 0x07, 0x3d, 0x79, 0xd3,
+ 0x8b, 0xed, 0xca, 0xde, 0xa8, 0x91, 0xfb, 0x61, 0x48, 0x83, 0xd5, 0x15, 0x9e, 0x79, 0x86, 0x62,
+ 0x44, 0xe4, 0xb7, 0x83, 0x6e, 0x28, 0x37, 0x15, 0x45, 0x58, 0xfb, 0x3b, 0x9c, 0xfd, 0xb1, 0x50,
+ 0x37, 0x08, 0x7c, 0x97, 0xb2, 0x6d, 0x3a, 0x0c, 0x5b, 0x9b, 0xbe, 0xeb, 0xfa, 0x5e, 0x4b, 0xf4,
+ 0x92, 0x42, 0x68, 0x9e, 0x3e, 0xf9, 0x76, 0xe5, 0xb9, 0x77, 0x21, 0xcf, 0xb6, 0x03, 0x7f, 0xd8,
+ 0xdd, 0x16, 0x59, 0x21, 0xd3, 0xbe, 0x34, 0x3d, 0x3d, 0x4d, 0x81, 0xe8, 0x01, 0x3a, 0xcd, 0xb5,
+ 0x45, 0x37, 0x77, 0xc2, 0xa1, 0x2b, 0x3b, 0xb4, 0x76, 0x76, 0x7f, 0xd4, 0xb0, 0xde, 0x21, 0x11,
+ 0x18, 0x7f, 0x9a, 0x86, 0x86, 0x70, 0xd4, 0x7b, 0xa2, 0x6c, 0xb8, 0xea, 0x07, 0x37, 0x29, 0x0b,
+ 0x7a, 0x9b, 0xb7, 0x1c, 0x97, 0x6a, 0xdf, 0x68, 0x40, 0xc9, 0x15, 0xc0, 0x07, 0xc6, 0x15, 0x00,
+ 0x37, 0xc2, 0x43, 0xa7, 0x00, 0xc4, 0x9d, 0x91, 0xeb, 0xf2, 0x36, 0x14, 0x05, 0x44, 0x2c, 0x2f,
+ 0x27, 0x34, 0xd5, 0x9a, 0x52, 0x32, 0xa5, 0xa1, 0xd5, 0x71, 0x0d, 0x4d, 0x4d, 0x27, 0x52, 0x8b,
+ 0xe9, 0xeb, 0xd9, 0xa4, 0xaf, 0xe3, 0xbf, 0x5b, 0x50, 0x5f, 0xd3, 0x9c, 0x1f, 0x52, 0x1d, 0x5a,
+ 0xde, 0xf4, 0x2b, 0x92, 0x37, 0xf3, 0xdf, 0xc9, 0x8b, 0xeb, 0x00, 0x6b, 0x3d, 0x8f, 0x5e, 0xed,
+ 0xf5, 0x19, 0x0d, 0x26, 0x74, 0x22, 0x9f, 0x66, 0xe2, 0x90, 0x40, 0xe8, 0x96, 0x96, 0x73, 0xd9,
+ 0x88, 0xc3, 0xaf, 0x42, 0x8c, 0xf4, 0x2b, 0x34, 0x5b, 0x66, 0x2c, 0x44, 0xed, 0x40, 0x7e, 0x4b,
+ 0x88, 0x27, 0x53, 0x6a, 0xe2, 0x19, 0x25, 0x96, 0xbd, 0x7d, 0x59, 0x1d, 0x7e, 0xe1, 0x65, 0x05,
+ 0x89, 0x78, 0xf5, 0x69, 0x85, 0xbb, 0x1e, 0x73, 0x1e, 0x1b, 0x9b, 0x89, 0x3e, 0x01, 0xfd, 0x4c,
+ 0x95, 0x5b, 0xd9, 0x89, 0xe5, 0x96, 0xbe, 0xb9, 0x87, 0xef, 0x19, 0x3f, 0x88, 0x63, 0x9f, 0x30,
+ 0x87, 0x8a, 0x7d, 0x67, 0xc0, 0x0e, 0xe8, 0x96, 0x4e, 0xd2, 0x28, 0x3e, 0x36, 0xc2, 0x14, 0xeb,
+ 0xf8, 0xcf, 0x16, 0xcc, 0x5d, 0xa3, 0x2c, 0x59, 0xfe, 0xbc, 0x46, 0xc6, 0xc4, 0x1f, 0xc1, 0x51,
+ 0x83, 0x7f, 0x25, 0xfd, 0x85, 0xb1, 0x9a, 0xe7, 0x8d, 0x58, 0xfe, 0x55, 0xaf, 0x43, 0x1f, 0xab,
+ 0x5e, 0x31, 0x59, 0xee, 0xdc, 0x81, 0x92, 0xb1, 0x88, 0xae, 0x8c, 0x15, 0x3a, 0xc6, 0xcb, 0x4e,
+ 0x94, 0xac, 0xdb, 0x15, 0x25, 0x93, 0xec, 0x16, 0x55, 0x19, 0x1b, 0x15, 0x05, 0xeb, 0x80, 0x84,
+ 0xb9, 0x04, 0x59, 0x33, 0x2d, 0x09, 0xe8, 0x8d, 0xa8, 0xe2, 0x89, 0xe6, 0xe8, 0x34, 0xd8, 0x81,
+ 0xff, 0x48, 0x57, 0xb0, 0x33, 0xf1, 0x91, 0xc4, 0x7f, 0x44, 0xc4, 0x12, 0xbe, 0x0c, 0x19, 0xe2,
+ 0x3f, 0x42, 0x75, 0x80, 0xc0, 0xf1, 0xba, 0xf4, 0x5e, 0xd4, 0x38, 0x95, 0x89, 0x01, 0x79, 0x41,
+ 0xc9, 0xb0, 0x0c, 0x47, 0x4d, 0x8e, 0xa4, 0xb9, 0x9b, 0x90, 0xff, 0x78, 0x68, 0xaa, 0xab, 0x32,
+ 0xa6, 0x2e, 0xd9, 0x83, 0x6b, 0x24, 0xee, 0x33, 0x10, 0xc3, 0xd1, 0x49, 0x28, 0x32, 0x67, 0xa3,
+ 0x4f, 0x6f, 0xc5, 0x01, 0x2e, 0x06, 0xf0, 0x55, 0xde, 0xf3, 0xdd, 0x33, 0x6a, 0x9f, 0x18, 0x80,
+ 0xce, 0xc2, 0x5c, 0xcc, 0xf3, 0x9d, 0x80, 0x6e, 0xf5, 0x1e, 0x0b, 0x0b, 0x97, 0xc9, 0x01, 0x38,
+ 0x5a, 0x84, 0x23, 0x31, 0x6c, 0x5d, 0xd4, 0x18, 0xb6, 0x40, 0x1d, 0x07, 0x73, 0xdd, 0x08, 0x71,
+ 0x3f, 0x7c, 0x38, 0x74, 0xfa, 0xe2, 0xe6, 0x95, 0x89, 0x01, 0xc1, 0x7f, 0xb1, 0xe0, 0xa8, 0x34,
+ 0x35, 0xef, 0xf6, 0x5f, 0x47, 0xaf, 0xff, 0xdc, 0x02, 0x64, 0x4a, 0xa0, 0x5c, 0xeb, 0x5b, 0xe6,
+ 0x33, 0x0e, 0x2f, 0x62, 0x4a, 0xa2, 0x95, 0x95, 0xa0, 0xf8, 0x25, 0x06, 0x43, 0x4e, 0x14, 0x42,
+ 0xb2, 0xa7, 0xb6, 0x65, 0xaf, 0x2c, 0x21, 0x44, 0x7d, 0x79, 0x8b, 0xbf, 0xb1, 0xcb, 0x68, 0xa8,
+ 0x3a, 0x5d, 0xd1, 0xe2, 0x0b, 0x00, 0x91, 0x1f, 0x7e, 0x16, 0xf5, 0x98, 0xf0, 0x1a, 0x3b, 0x3e,
+ 0x4b, 0x81, 0x88, 0x1e, 0xe0, 0x3f, 0xa4, 0x61, 0xe6, 0x9e, 0xdf, 0x1f, 0xc6, 0x29, 0xf1, 0x75,
+ 0x4a, 0x15, 0x89, 0xf6, 0x3b, 0xab, 0xdb, 0x6f, 0x04, 0x76, 0xc8, 0xe8, 0x40, 0x78, 0x56, 0x86,
+ 0x88, 0x31, 0xc2, 0x50, 0x66, 0x4e, 0xd0, 0xa5, 0x4c, 0xf6, 0x35, 0xd5, 0x9c, 0x28, 0x38, 0x13,
+ 0x30, 0xb4, 0x00, 0x25, 0xa7, 0xdb, 0x0d, 0x68, 0xd7, 0x61, 0xb4, 0xbd, 0x5b, 0xcd, 0x8b, 0xc3,
+ 0x4c, 0x10, 0xfe, 0x31, 0xcc, 0x6a, 0x65, 0x29, 0x93, 0xbe, 0x0b, 0xf9, 0x4f, 0x04, 0x64, 0xc2,
+ 0x93, 0x97, 0x44, 0x55, 0x61, 0x4c, 0xa3, 0x25, 0xdf, 0xc7, 0x35, 0xcf, 0xf8, 0x3a, 0xe4, 0x24,
+ 0x3a, 0x3a, 0x69, 0x76, 0x27, 0xf2, 0x6d, 0x86, 0xcf, 0x55, 0xab, 0x81, 0x21, 0x27, 0x09, 0x29,
+ 0xc3, 0x0b, 0xdf, 0x90, 0x10, 0xa2, 0xbe, 0x67, 0xcf, 0x40, 0x31, 0x7a, 0xdc, 0x46, 0x25, 0xc8,
+ 0x5f, 0xbd, 0x4d, 0x7e, 0x74, 0x85, 0xac, 0xcc, 0xa5, 0x50, 0x19, 0x0a, 0xed, 0x2b, 0xcb, 0x37,
+ 0xc4, 0xcc, 0x5a, 0xfa, 0xda, 0xd6, 0x91, 0x25, 0x40, 0xdf, 0x83, 0xac, 0x0c, 0x17, 0xc7, 0x62,
+ 0xfe, 0xcd, 0x67, 0xe4, 0xda, 0xf1, 0x03, 0x70, 0xa9, 0x01, 0x9c, 0x7a, 0xd7, 0x42, 0xb7, 0xa0,
+ 0x24, 0x80, 0xea, 0xc1, 0xe8, 0xe4, 0xf8, 0xbb, 0x4d, 0x82, 0xd2, 0xa9, 0x17, 0xac, 0x1a, 0xf4,
+ 0x2e, 0x41, 0x56, 0xd8, 0xc4, 0xe4, 0xc6, 0x7c, 0xf0, 0x33, 0xb9, 0x49, 0x3c, 0xa1, 0xe1, 0x14,
+ 0xfa, 0x2e, 0xd8, 0xbc, 0x85, 0x42, 0x46, 0x52, 0x31, 0xde, 0x79, 0x6a, 0xc7, 0xc6, 0xc1, 0xc6,
+ 0xb1, 0x1f, 0x44, 0xcf, 0x55, 0xc7, 0xc7, 0xdb, 0x66, 0xbd, 0xbd, 0x7a, 0x70, 0x21, 0x3a, 0xf9,
+ 0xb6, 0x7c, 0x57, 0xd1, 0xcd, 0x1b, 0x3a, 0x95, 0x3c, 0x6a, 0xac, 0xd7, 0xab, 0xd5, 0x5f, 0xb4,
+ 0x1c, 0x11, 0x5c, 0x83, 0x92, 0xd1, 0x38, 0x99, 0x6a, 0x3d, 0xd8, 0xf5, 0x99, 0x6a, 0x9d, 0xd0,
+ 0x6d, 0xe1, 0x14, 0xba, 0x06, 0x05, 0x9e, 0x8a, 0x79, 0x44, 0x42, 0x27, 0xc6, 0x33, 0xae, 0x11,
+ 0x69, 0x6b, 0x27, 0x27, 0x2f, 0x46, 0x84, 0x7e, 0x00, 0xc5, 0x6b, 0x94, 0x29, 0x77, 0x3d, 0x3e,
+ 0xee, 0xef, 0x13, 0x34, 0x95, 0xbc, 0x33, 0x38, 0xb5, 0xf4, 0x53, 0xfd, 0xa7, 0xd7, 0x8a, 0xc3,
+ 0x1c, 0x74, 0x1b, 0x66, 0x05, 0x63, 0xd1, 0xbf, 0x62, 0x09, 0x07, 0x3a, 0xf0, 0x17, 0x5c, 0xc2,
+ 0x81, 0x0e, 0xfe, 0x15, 0x87, 0x53, 0xed, 0xfb, 0x4f, 0x9f, 0xd5, 0x53, 0x5f, 0x3c, 0xab, 0xa7,
+ 0xbe, 0x7a, 0x56, 0xb7, 0x7e, 0xb1, 0x57, 0xb7, 0x7e, 0xbf, 0x57, 0xb7, 0x9e, 0xec, 0xd5, 0xad,
+ 0xa7, 0x7b, 0x75, 0xeb, 0x9f, 0x7b, 0x75, 0xeb, 0x5f, 0x7b, 0xf5, 0xd4, 0x57, 0x7b, 0x75, 0xeb,
+ 0xb3, 0xe7, 0xf5, 0xd4, 0xd3, 0xe7, 0xf5, 0xd4, 0x17, 0xcf, 0xeb, 0xa9, 0xfb, 0x6f, 0xfe, 0x87,
+ 0x42, 0x52, 0x36, 0xba, 0x39, 0xf1, 0xb9, 0xf0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xbe,
+ 0x5b, 0x4c, 0xb3, 0x1c, 0x00, 0x00,
}
func (x Direction) String() string {
@@ -3772,6 +3775,9 @@ func (this *GetChunkRefRequest) Equal(that interface{}) bool {
return false
}
}
+ if !this.Plan.Equal(that1.Plan) {
+ return false
+ }
return true
}
func (this *GetChunkRefResponse) Equal(that interface{}) bool {
@@ -4586,12 +4592,13 @@ func (this *GetChunkRefRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 8)
+ s := make([]string, 0, 9)
s = append(s, "&logproto.GetChunkRefRequest{")
s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n")
s = append(s, "Through: "+fmt.Sprintf("%#v", this.Through)+",\n")
s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n")
s = append(s, "Filters: "+fmt.Sprintf("%#v", this.Filters)+",\n")
+ s = append(s, "Plan: "+fmt.Sprintf("%#v", this.Plan)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -6720,6 +6727,16 @@ func (m *GetChunkRefRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ {
+ size := m.Plan.Size()
+ i -= size
+ if _, err := m.Plan.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintLogproto(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
if len(m.Filters) > 0 {
for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -7942,6 +7959,8 @@ func (m *GetChunkRefRequest) Size() (n int) {
n += 1 + l + sovLogproto(uint64(l))
}
}
+ l = m.Plan.Size()
+ n += 1 + l + sovLogproto(uint64(l))
return n
}
@@ -8620,6 +8639,7 @@ func (this *GetChunkRefRequest) String() string {
`Through:` + fmt.Sprintf("%v", this.Through) + `,`,
`Matchers:` + fmt.Sprintf("%v", this.Matchers) + `,`,
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+ `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`,
`}`,
}, "")
return s
@@ -13037,6 +13057,39 @@ func (m *GetChunkRefRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto
index 0fde83d2715d..bf175168cfd9 100644
--- a/pkg/logproto/logproto.proto
+++ b/pkg/logproto/logproto.proto
@@ -311,10 +311,15 @@ message GetChunkRefRequest {
(gogoproto.nullable) = false
];
string matchers = 3;
+ // TODO(salvacorts): Delete this field once the weekly release is done.
repeated LineFilter filters = 4 [
(gogoproto.customtype) = "github.com/grafana/loki/pkg/logql/syntax.LineFilter",
(gogoproto.nullable) = false
];
+ Plan plan = 5 [
+ (gogoproto.customtype) = "github.com/grafana/loki/pkg/querier/plan.QueryPlan",
+ (gogoproto.nullable) = false
+ ];
}
message GetChunkRefResponse {
diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go
index cea41f4d95c5..26e77779c4b3 100644
--- a/pkg/logql/syntax/ast.go
+++ b/pkg/logql/syntax/ast.go
@@ -54,6 +54,22 @@ func MustClone[T Expr](e T) T {
return copied
}
+func ExtractLineFilters(e Expr) []LineFilterExpr {
+ if e == nil {
+ return nil
+ }
+ var filters []LineFilterExpr
+ visitor := &DepthFirstTraversal{
+ VisitLineFilterFn: func(v RootVisitor, e *LineFilterExpr) {
+ if e != nil {
+ filters = append(filters, *e)
+ }
+ },
+ }
+ e.Accept(visitor)
+ return filters
+}
+
// implicit holds default implementations
type implicit struct{}
diff --git a/pkg/logql/syntax/serialize_test.go b/pkg/logql/syntax/serialize_test.go
index f4051caaf7ea..2c6bb6f0ef66 100644
--- a/pkg/logql/syntax/serialize_test.go
+++ b/pkg/logql/syntax/serialize_test.go
@@ -30,6 +30,9 @@ func TestJSONSerializationRoundTrip(t *testing.T) {
"regexp": {
query: `{env="prod", app=~"loki.*"} |~ ".*foo.*"`,
},
+ "line filter": {
+ query: `{env="prod", app=~"loki.*"} |= "foo" |= "bar" or "baz" | line_format "blip{{ .foo }}blop" |= "blip"`,
+ },
"vector matching": {
query: `(sum by (cluster)(rate({foo="bar"}[5m])) / ignoring (cluster) count(rate({foo="bar"}[5m])))`,
},
diff --git a/pkg/querier/plan/plan.go b/pkg/querier/plan/plan.go
index 6822932d7b24..d6548537a394 100644
--- a/pkg/querier/plan/plan.go
+++ b/pkg/querier/plan/plan.go
@@ -4,6 +4,7 @@ import (
"bytes"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/util"
)
type QueryPlan struct {
@@ -78,6 +79,20 @@ func (t QueryPlan) Equal(other QueryPlan) bool {
return bytes.Equal(left, right)
}
+func (t QueryPlan) String() string {
+ if t.AST == nil {
+ return ""
+ }
+ return t.AST.String()
+}
+
+func (t *QueryPlan) Hash() uint32 {
+ if t.AST == nil {
+ return 0
+ }
+ return util.HashedQuery(t.AST.String())
+}
+
// countWriter is not writing any bytes. It just counts the bytes that would be
// written.
type countWriter struct {
diff --git a/pkg/storage/chunk/predicate.go b/pkg/storage/chunk/predicate.go
index 9dd769ccc4da..391b1e916323 100644
--- a/pkg/storage/chunk/predicate.go
+++ b/pkg/storage/chunk/predicate.go
@@ -1,16 +1,22 @@
package chunk
import (
+ "github.com/grafana/loki/pkg/querier/plan"
"github.com/prometheus/prometheus/model/labels"
-
- "github.com/grafana/loki/pkg/logql/syntax"
)
type Predicate struct {
Matchers []*labels.Matcher
- Filters []syntax.LineFilter
+ plan *plan.QueryPlan
+}
+
+func NewPredicate(m []*labels.Matcher, p *plan.QueryPlan) Predicate {
+ return Predicate{Matchers: m, plan: p}
}
-func NewPredicate(m []*labels.Matcher, f []syntax.LineFilter) Predicate {
- return Predicate{Matchers: m, Filters: f}
+func (p Predicate) Plan() plan.QueryPlan {
+ if p.plan != nil {
+ return *p.plan
+ }
+ return plan.QueryPlan{}
}
diff --git a/pkg/storage/store.go b/pkg/storage/store.go
index de5a60b7f038..57918066052a 100644
--- a/pkg/storage/store.go
+++ b/pkg/storage/store.go
@@ -21,10 +21,8 @@ import (
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
- "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/querier/astmapper"
- "github.com/grafana/loki/pkg/querier/plan"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/chunk/client"
@@ -477,34 +475,15 @@ func (s *LokiStore) SelectSeries(ctx context.Context, req logql.SelectLogParams)
return result, nil
}
-func extractLineFilters(p *plan.QueryPlan) []syntax.LineFilter {
- lineFilters := make([]syntax.LineFilter, 0)
- visitor := &syntax.DepthFirstTraversal{
- VisitLineFilterFn: func(v syntax.RootVisitor, e *syntax.LineFilterExpr) {
- if e.Left != nil {
- e.Left.Accept(v)
- }
- if e.Or != nil {
- e.Or.Accept(v)
- }
- lineFilters = append(lineFilters, e.LineFilter)
- },
- }
- p.AST.Accept(visitor)
- return lineFilters
-}
-
// SelectLogs returns an iterator that will query the store for more chunks while iterating instead of fetching all chunks upfront
// for that request.
func (s *LokiStore) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
- lf := extractLineFilters(req.Plan)
-
matchers, from, through, err := decodeReq(req)
if err != nil {
return nil, err
}
- lazyChunks, err := s.lazyChunks(ctx, from, through, chunk.NewPredicate(matchers, lf))
+ lazyChunks, err := s.lazyChunks(ctx, from, through, chunk.NewPredicate(matchers, req.Plan))
if err != nil {
return nil, err
}
@@ -546,14 +525,12 @@ func (s *LokiStore) SelectLogs(ctx context.Context, req logql.SelectLogParams) (
}
func (s *LokiStore) SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) {
- lf := extractLineFilters(req.Plan)
-
matchers, from, through, err := decodeReq(req)
if err != nil {
return nil, err
}
- lazyChunks, err := s.lazyChunks(ctx, from, through, chunk.NewPredicate(matchers, lf))
+ lazyChunks, err := s.lazyChunks(ctx, from, through, chunk.NewPredicate(matchers, req.Plan))
if err != nil {
return nil, err
}
diff --git a/pkg/storage/stores/series/series_index_gateway_store.go b/pkg/storage/stores/series/series_index_gateway_store.go
index d937042275b8..00059fe16c1a 100644
--- a/pkg/storage/stores/series/series_index_gateway_store.go
+++ b/pkg/storage/stores/series/series_index_gateway_store.go
@@ -33,7 +33,7 @@ func (c *IndexGatewayClientStore) GetChunkRefs(ctx context.Context, _ string, fr
From: from,
Through: through,
Matchers: (&syntax.MatchersExpr{Mts: predicate.Matchers}).String(),
- Filters: predicate.Filters,
+ Plan: predicate.Plan(),
})
if err != nil {
return nil, err
diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go b/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go
index 8b0f186386bd..25ce68a3bffd 100644
--- a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go
+++ b/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go
@@ -6,9 +6,6 @@ import (
"sort"
"sync"
- "github.com/grafana/loki/pkg/storage/chunk"
- "github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
-
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
@@ -20,9 +17,12 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
+ "github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores"
"github.com/grafana/loki/pkg/storage/stores/index"
+ "github.com/grafana/loki/pkg/storage/stores/index/seriesvolume"
seriesindex "github.com/grafana/loki/pkg/storage/stores/series/index"
"github.com/grafana/loki/pkg/util/spanlogger"
)
@@ -49,7 +49,7 @@ type IndexClientWithRange struct {
}
type BloomQuerier interface {
- FilterChunkRefs(ctx context.Context, tenant string, from, through model.Time, chunks []*logproto.ChunkRef, filters ...syntax.LineFilter) ([]*logproto.ChunkRef, error)
+ FilterChunkRefs(ctx context.Context, tenant string, from, through model.Time, chunks []*logproto.ChunkRef, plan plan.QueryPlan) ([]*logproto.ChunkRef, error)
}
type Gateway struct {
@@ -204,7 +204,7 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ
return nil, err
}
- predicate := chunk.NewPredicate(matchers, req.Filters)
+ predicate := chunk.NewPredicate(matchers, &req.Plan)
chunks, _, err := g.indexQuerier.GetChunks(ctx, instanceID, req.From, req.Through, predicate)
if err != nil {
return nil, err
@@ -221,17 +221,20 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ
initialChunkCount := len(result.Refs)
- // Return unfiltered results if there is no bloom querier (Bloom Gateway disabled) or if there are not filters.
- if g.bloomQuerier == nil || len(req.Filters) == 0 {
- level.Info(g.log).Log("msg", "chunk filtering is not enabled or there is no line filter", "filters", len(req.Filters))
+ // Return unfiltered results if there is no bloom querier (Bloom Gateway disabled)
+ if g.bloomQuerier == nil {
+ level.Info(g.log).Log("msg", "chunk filtering is not enabled")
+ return result, nil
+ }
+
+ // Extract LineFiltersExpr from the plan. If there is none, we can short-circuit and return before making a req
+ // to the bloom-gateway (through the g.bloomQuerier)
+ if len(syntax.ExtractLineFilters(req.Plan.AST)) == 0 {
+ level.Info(g.log).Log("msg", "there are no line filters")
return result, nil
}
- // TODO(chaudum): Take the chunks from the index querier's GetChunks()
- // response and send them to the bloom gateway along with the filter
- // expression that we got from the request object.
- // The bloom gateway returns the list of matching ChunkRefs.
- chunkRefs, err := g.bloomQuerier.FilterChunkRefs(ctx, instanceID, req.From, req.Through, result.Refs, req.Filters...)
+ chunkRefs, err := g.bloomQuerier.FilterChunkRefs(ctx, instanceID, req.From, req.Through, result.Refs, req.Plan)
if err != nil {
return nil, err
}
From 8193063a4a63867713d3b8ea11809477927e08ae Mon Sep 17 00:00:00 2001
From: Salva Corts
Date: Fri, 23 Feb 2024 17:38:45 +0100
Subject: [PATCH 115/130] feat: Convert line filter expressions to a set of
bloom tests (#12035)
---
pkg/bloomgateway/multiplexing.go | 18 +--
pkg/storage/bloom/v1/bloom_tester.go | 156 ++++++++++++++++++++++
pkg/storage/bloom/v1/bloom_tester_test.go | 145 ++++++++++++++++++++
pkg/storage/bloom/v1/fuse.go | 32 ++---
pkg/storage/bloom/v1/fuse_test.go | 12 +-
pkg/storage/bloom/v1/tokenizer.go | 17 +--
6 files changed, 342 insertions(+), 38 deletions(-)
create mode 100644 pkg/storage/bloom/v1/bloom_tester.go
create mode 100644 pkg/storage/bloom/v1/bloom_tester_test.go
diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go
index 907f8f111eb1..c9e3e9cf6f4e 100644
--- a/pkg/bloomgateway/multiplexing.go
+++ b/pkg/bloomgateway/multiplexing.go
@@ -139,20 +139,20 @@ func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task {
func (t Task) RequestIter(tokenizer *v1.NGramTokenizer) v1.Iterator[v1.Request] {
return &requestIterator{
- series: v1.NewSliceIter(t.series),
- searches: convertToSearches(tokenizer, t.filters...),
- channel: t.resCh,
- curr: v1.Request{},
+ series: v1.NewSliceIter(t.series),
+ search: v1.FiltersToBloomTest(tokenizer, t.filters...),
+ channel: t.resCh,
+ curr: v1.Request{},
}
}
var _ v1.Iterator[v1.Request] = &requestIterator{}
type requestIterator struct {
- series v1.Iterator[*logproto.GroupedChunkRefs]
- searches [][]byte
- channel chan<- v1.Output
- curr v1.Request
+ series v1.Iterator[*logproto.GroupedChunkRefs]
+ search v1.BloomTest
+ channel chan<- v1.Output
+ curr v1.Request
}
// At implements v1.Iterator.
@@ -175,7 +175,7 @@ func (it *requestIterator) Next() bool {
it.curr = v1.Request{
Fp: model.Fingerprint(group.Fingerprint),
Chks: convertToChunkRefs(group.Refs),
- Searches: it.searches,
+ Search: it.search,
Response: it.channel,
}
return true
diff --git a/pkg/storage/bloom/v1/bloom_tester.go b/pkg/storage/bloom/v1/bloom_tester.go
new file mode 100644
index 000000000000..19f9f8d557f0
--- /dev/null
+++ b/pkg/storage/bloom/v1/bloom_tester.go
@@ -0,0 +1,156 @@
+package v1
+
+import (
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/storage/bloom/v1/filter"
+)
+
+type BloomTest interface {
+ Matches(bloom filter.Checker) bool
+ MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefixLen int) bool
+}
+
+type BloomTests []BloomTest
+
+func (b BloomTests) Matches(bloom filter.Checker) bool {
+ for _, test := range b {
+ if !test.Matches(bloom) {
+ return false
+ }
+ }
+ return true
+}
+
+func (b BloomTests) MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefixLen int) bool {
+ for _, test := range b {
+ if !test.MatchesWithPrefixBuf(bloom, buf, prefixLen) {
+ return false
+ }
+ }
+ return true
+}
+
+func FiltersToBloomTest(b NGramBuilder, filters ...syntax.LineFilterExpr) BloomTest {
+ tests := make(BloomTests, 0, len(filters))
+ for _, f := range filters {
+ if f.Left != nil {
+ tests = append(tests, FiltersToBloomTest(b, *f.Left))
+ }
+ if f.Or != nil {
+ left := FiltersToBloomTest(b, *f.Or)
+ right := simpleFilterToBloomTest(b, f.LineFilter)
+ tests = append(tests, newOrTest(left, right))
+ continue
+ }
+
+ tests = append(tests, simpleFilterToBloomTest(b, f.LineFilter))
+ }
+ return tests
+}
+
+func simpleFilterToBloomTest(b NGramBuilder, filter syntax.LineFilter) BloomTest {
+ switch filter.Ty {
+ case labels.MatchEqual, labels.MatchNotEqual:
+ var test BloomTest = newStringTest(b, filter.Match)
+ if filter.Ty == labels.MatchNotEqual {
+ test = newNotTest(test)
+ }
+ return test
+ case labels.MatchRegexp, labels.MatchNotRegexp:
+ // TODO(salvacorts): Simplify regex similarly to how it's done at pkg/logql/log/filter.go (`simplify` function)
+ // Ideally we want to extract the simplify logic into pkg/util/regex.go
+ return MatchAll
+ default:
+ return MatchAll
+ }
+}
+
+type matchAllTest struct{}
+
+var MatchAll = matchAllTest{}
+
+func (n matchAllTest) Matches(_ filter.Checker) bool {
+ return true
+}
+
+func (n matchAllTest) MatchesWithPrefixBuf(_ filter.Checker, _ []byte, _ int) bool {
+ return true
+}
+
+// NGramBuilder is an interface for tokenizing strings into ngrams
+// Extracting this interface allows us to test the bloom filter without having to use the actual tokenizer
+// TODO: This should be moved to tokenizer.go
+type NGramBuilder interface {
+ Tokens(line string) Iterator[[]byte]
+}
+
+type stringTest struct {
+ ngrams [][]byte
+}
+
+func newStringTest(b NGramBuilder, search string) stringTest {
+ var test stringTest
+ it := b.Tokens(search)
+ for it.Next() {
+ ngram := make([]byte, len(it.At()))
+ copy(ngram, it.At())
+ test.ngrams = append(test.ngrams, ngram)
+ }
+ return test
+}
+
+func (b stringTest) Matches(bloom filter.Checker) bool {
+ for _, ngram := range b.ngrams {
+ if !bloom.Test(ngram) {
+ return false
+ }
+ }
+ return true
+}
+
+func (b stringTest) MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefixLen int) bool {
+ for _, ngram := range b.ngrams {
+ buf = append(buf[:prefixLen], ngram...)
+ if !bloom.Test(buf) {
+ return false
+ }
+ }
+ return true
+}
+
+type notTest struct {
+ BloomTest
+}
+
+func newNotTest(test BloomTest) BloomTest {
+ return notTest{BloomTest: test}
+}
+
+func (b notTest) Matches(bloom filter.Checker) bool {
+ return !b.BloomTest.Matches(bloom)
+}
+
+func (b notTest) MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefixLen int) bool {
+ return !b.BloomTest.MatchesWithPrefixBuf(bloom, buf, prefixLen)
+}
+
+type orTest struct {
+ left, right BloomTest
+}
+
+func newOrTest(left, right BloomTest) orTest {
+ return orTest{
+ left: left,
+ right: right,
+ }
+}
+
+func (o orTest) Matches(bloom filter.Checker) bool {
+ return o.left.Matches(bloom) || o.right.Matches(bloom)
+}
+
+func (o orTest) MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefixLen int) bool {
+ return o.left.MatchesWithPrefixBuf(bloom, buf, prefixLen) || o.right.MatchesWithPrefixBuf(bloom, buf, prefixLen)
+}
diff --git a/pkg/storage/bloom/v1/bloom_tester_test.go b/pkg/storage/bloom/v1/bloom_tester_test.go
new file mode 100644
index 000000000000..d99984f7b835
--- /dev/null
+++ b/pkg/storage/bloom/v1/bloom_tester_test.go
@@ -0,0 +1,145 @@
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/storage/bloom/v1/filter"
+)
+
+func TestFiltersToBloomTests(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ query string
+ bloom filter.Checker
+ expectMatch bool
+ }{
+ {
+ name: "No filters",
+ query: `{app="fake"}`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "Single filter",
+ query: `{app="fake"} |= "foo"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "Single filter no match",
+ query: `{app="fake"} |= "nope"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: false,
+ },
+ {
+ name: "two filters",
+ query: `{app="fake"} |= "foo" |= "bar"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "two filters no match",
+ query: `{app="fake"} |= "foo" |= "nope"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: false,
+ },
+ {
+ name: "notEq match",
+ query: `{app="fake"} != "nope"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "notEq no match",
+ query: `{app="fake"} != "foo"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: false,
+ },
+ {
+ name: "or filter both match",
+ query: `{app="fake"} |= "foo" or "bar"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "or filter one right match",
+ query: `{app="fake"} |= "nope" or "foo"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "or filter one left match",
+ query: `{app="fake"} |= "foo" or "nope"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "or filter no match",
+ query: `{app="fake"} |= "no" or "nope"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: false,
+ },
+ {
+ name: "Not or filter match",
+ query: `{app="fake"} != "nope" or "no"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: true,
+ },
+ {
+ name: "Not or filter right no match",
+ query: `{app="fake"} != "nope" or "bar"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: false,
+ },
+ {
+ name: "Not or filter left no match",
+ query: `{app="fake"} != "foo" or "nope"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: false,
+ },
+ {
+ name: "Not or filter no match",
+ query: `{app="fake"} != "foo" or "bar"`,
+ bloom: fakeBloom{"foo", "bar"},
+ expectMatch: false,
+ },
+ {
+ name: "complex filter match",
+ query: `{app="fake"} |= "foo" |= "bar" or "baz" |= "fuzz" or "not" != "nope" != "no" or "none"`,
+ bloom: fakeBloom{"foo", "bar", "baz", "fuzz"},
+ expectMatch: true,
+ },
+ // TODO: test regexes
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ expr, err := syntax.ParseExpr(tc.query)
+ assert.NoError(t, err)
+ filters := syntax.ExtractLineFilters(expr)
+
+ bloomTests := FiltersToBloomTest(fakeNgramBuilder{}, filters...)
+
+ assert.Equal(t, tc.expectMatch, bloomTests.Matches(tc.bloom))
+ })
+ }
+}
+
+type fakeNgramBuilder struct{}
+
+func (f fakeNgramBuilder) Tokens(line string) Iterator[[]byte] {
+ return NewSliceIter[[]byte]([][]byte{[]byte(line)})
+}
+
+type fakeBloom []string
+
+func (f fakeBloom) Test(data []byte) bool {
+ str := string(data)
+ for _, match := range f {
+ if str == match {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go
index 77937ecfc08e..14a836c7b066 100644
--- a/pkg/storage/bloom/v1/fuse.go
+++ b/pkg/storage/bloom/v1/fuse.go
@@ -8,7 +8,7 @@ import (
type Request struct {
Fp model.Fingerprint
Chks ChunkRefs
- Searches [][]byte
+ Search BloomTest
Response chan<- Output
}
@@ -103,22 +103,19 @@ func (fq *FusedQuerier) Run() error {
bloom := fq.bq.blooms.At()
// test every input against this chunk
- inputLoop:
for _, input := range nextBatch {
_, inBlooms := input.Chks.Compare(series.Chunks, true)
// First, see if the search passes the series level bloom before checking for chunks individually
- for _, search := range input.Searches {
- if !bloom.Test(search) {
- // We return all the chunks that were the intersection of the query
- // because they for sure do not match the search and don't
- // need to be downloaded
- input.Response <- Output{
- Fp: fp,
- Removals: inBlooms,
- }
- continue inputLoop
+ if !input.Search.Matches(bloom) {
+ // We return all the chunks that were the intersection of the query
+ // because they for sure do not match the search and don't
+ // need to be downloaded
+ input.Response <- Output{
+ Fp: fp,
+ Removals: inBlooms,
}
+ continue
}
// TODO(owen-d): pool
@@ -128,17 +125,12 @@ func (fq *FusedQuerier) Run() error {
var tokenBuf []byte
var prefixLen int
- chunkLoop:
for _, chk := range inBlooms {
// Get buf to concatenate the chunk and search token
tokenBuf, prefixLen = prefixedToken(schema.NGramLen(), chk, tokenBuf)
- for _, search := range input.Searches {
- tokenBuf = append(tokenBuf[:prefixLen], search...)
-
- if !bloom.Test(tokenBuf) {
- removals = append(removals, chk)
- continue chunkLoop
- }
+ if !input.Search.MatchesWithPrefixBuf(bloom, tokenBuf, prefixLen) {
+ removals = append(removals, chk)
+ continue
}
// Otherwise, the chunk passed all the searches
}
diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go
index 1e35895794c2..ab5aa478aab3 100644
--- a/pkg/storage/bloom/v1/fuse_test.go
+++ b/pkg/storage/bloom/v1/fuse_test.go
@@ -12,6 +12,16 @@ import (
"github.com/grafana/loki/pkg/chunkenc"
)
+func keysToBloomTest(keys [][]byte) BloomTest {
+ var tokenizer fakeNgramBuilder
+ tests := make(BloomTests, 0, len(keys))
+ for _, key := range keys {
+ tests = append(tests, newStringTest(tokenizer, string(key)))
+ }
+
+ return tests
+}
+
func TestFusedQuerier(t *testing.T) {
// references for linking in memory reader+writer
indexBuf := bytes.NewBuffer(nil)
@@ -54,7 +64,7 @@ func TestFusedQuerier(t *testing.T) {
Fp: data[idx].Series.Fingerprint,
Chks: data[idx].Series.Chunks,
Response: ch,
- Searches: keys[idx],
+ Search: keysToBloomTest(keys[idx]),
})
}
inputs = append(inputs, reqs)
diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go
index e3e1e065bf08..41f190ed5065 100644
--- a/pkg/storage/bloom/v1/tokenizer.go
+++ b/pkg/storage/bloom/v1/tokenizer.go
@@ -40,10 +40,11 @@ func NewNGramTokenizer(n, skip int) *NGramTokenizer {
return t
}
+// Token implementsthe NGramBuilder interface
// The Token iterator uses shared buffers for performance. The []byte returned by At()
// is not safe for use after subsequent calls to Next()
-func (t *NGramTokenizer) Tokens(line string) NGramTokenIter {
- return NGramTokenIter{
+func (t *NGramTokenizer) Tokens(line string) Iterator[[]byte] {
+ return &NGramTokenIter{
n: t.N,
skip: t.Skip,
@@ -97,17 +98,17 @@ type PrefixedTokenIter struct {
buf []byte
prefixLen int
- NGramTokenIter
+ Iterator[[]byte]
}
func (t *PrefixedTokenIter) At() []byte {
- return append(t.buf[:t.prefixLen], t.NGramTokenIter.At()...)
+ return append(t.buf[:t.prefixLen], t.Iterator.At()...)
}
-func NewPrefixedTokenIter(buf []byte, prefixLn int, iter NGramTokenIter) *PrefixedTokenIter {
+func NewPrefixedTokenIter(buf []byte, prefixLn int, iter Iterator[[]byte]) *PrefixedTokenIter {
return &PrefixedTokenIter{
- buf: buf,
- prefixLen: prefixLn,
- NGramTokenIter: iter,
+ buf: buf,
+ prefixLen: prefixLn,
+ Iterator: iter,
}
}
From 7c8992f4e41f3e718575a06f346dc9006612c6eb Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Fri, 23 Feb 2024 10:43:30 -0800
Subject: [PATCH 116/130] fix(blooms): fix reference leak and resulting race
condition in BloomPageDecoder (#12050)
---
pkg/storage/bloom/v1/bloom.go | 41 ++++++++++++++-------------
pkg/storage/bloom/v1/bloom_querier.go | 6 ----
pkg/storage/bloom/v1/index.go | 7 +----
3 files changed, 22 insertions(+), 32 deletions(-)
diff --git a/pkg/storage/bloom/v1/bloom.go b/pkg/storage/bloom/v1/bloom.go
index f4339f004482..ef4f3623feb9 100644
--- a/pkg/storage/bloom/v1/bloom.go
+++ b/pkg/storage/bloom/v1/bloom.go
@@ -20,6 +20,8 @@ func (b *Bloom) Encode(enc *encoding.Encbuf) error {
// divide by 8 b/c bloom capacity is measured in bits, but we want bytes
buf := bytes.NewBuffer(BlockPool.Get(int(b.Capacity() / 8)))
+ // TODO(owen-d): have encoder implement writer directly so we don't need
+ // to indirect via a buffer
_, err := b.WriteTo(buf)
if err != nil {
return errors.Wrap(err, "encoding bloom filter")
@@ -56,7 +58,16 @@ func (b *Bloom) Decode(dec *encoding.Decbuf) error {
return nil
}
-func LazyDecodeBloomPage(dec *encoding.Decbuf, pool chunkenc.ReaderPool, decompressedSize int) (*BloomPageDecoder, error) {
+func LazyDecodeBloomPage(r io.Reader, pool chunkenc.ReaderPool, page BloomPageHeader) (*BloomPageDecoder, error) {
+ data := BlockPool.Get(page.Len)[:page.Len]
+ defer BlockPool.Put(data)
+
+ _, err := io.ReadFull(r, data)
+ if err != nil {
+ return nil, errors.Wrap(err, "reading bloom page")
+ }
+ dec := encoding.DecWith(data)
+
if err := dec.CheckCrc(castagnoliTable); err != nil {
return nil, errors.Wrap(err, "checksumming bloom page")
}
@@ -67,7 +78,7 @@ func LazyDecodeBloomPage(dec *encoding.Decbuf, pool chunkenc.ReaderPool, decompr
}
defer pool.PutReader(decompressor)
- b := BlockPool.Get(decompressedSize)[:decompressedSize]
+ b := make([]byte, page.DecompressedLen)
if _, err = io.ReadFull(decompressor, b); err != nil {
return nil, errors.Wrap(err, "decompressing bloom page")
@@ -98,6 +109,13 @@ func NewBloomPageDecoder(data []byte) *BloomPageDecoder {
}
// Decoder is a seekable, reset-able iterator
+// TODO(owen-d): use buffer pools. The reason we don't currently
+// do this is because the `data` slice currently escapes the decoder
+// via the returned bloom, so we can't know when it's safe to return it to the pool.
+// This happens via `data ([]byte) -> dec (*encoding.Decbuf) -> bloom (Bloom)` where
+// the final Bloom has a reference to the data slice.
+// We could optimize this by encoding the mode (read, write) into our structs
+// and doing copy-on-write shenannigans, but I'm avoiding this for now.
type BloomPageDecoder struct {
data []byte
dec *encoding.Decbuf
@@ -107,15 +125,6 @@ type BloomPageDecoder struct {
err error
}
-// Drop returns the underlying byte slice to the pool
-// for efficiency. It's intended to be used as a
-// perf optimization prior to garbage collection.
-func (d *BloomPageDecoder) Drop() {
- if cap(d.data) > 0 {
- BlockPool.Put(d.data)
- }
-}
-
func (d *BloomPageDecoder) Reset() {
d.err = nil
d.cur = nil
@@ -234,13 +243,5 @@ func (b *BloomBlock) BloomPageDecoder(r io.ReadSeeker, pageIdx int) (*BloomPageD
return nil, errors.Wrap(err, "seeking to bloom page")
}
- data := BlockPool.Get(page.Len)[:page.Len]
- _, err := io.ReadFull(r, data)
- if err != nil {
- return nil, errors.Wrap(err, "reading bloom page")
- }
-
- dec := encoding.DecWith(data)
-
- return LazyDecodeBloomPage(&dec, b.schema.DecompressorPool(), page.DecompressedLen)
+ return LazyDecodeBloomPage(r, b.schema.DecompressorPool(), page)
}
diff --git a/pkg/storage/bloom/v1/bloom_querier.go b/pkg/storage/bloom/v1/bloom_querier.go
index 1292addb7543..d0dbdc1b3b55 100644
--- a/pkg/storage/bloom/v1/bloom_querier.go
+++ b/pkg/storage/bloom/v1/bloom_querier.go
@@ -39,11 +39,6 @@ func (it *LazyBloomIter) Seek(offset BloomOffset) {
// load the desired page
if it.curPageIndex != offset.Page || it.curPage == nil {
- // drop the current page if it exists
- if it.curPage != nil {
- it.curPage.Drop()
- }
-
r, err := it.b.reader.Blooms()
if err != nil {
it.err = errors.Wrap(err, "getting blooms reader")
@@ -103,7 +98,6 @@ func (it *LazyBloomIter) next() bool {
}
// we've exhausted the current page, progress to next
it.curPageIndex++
- it.curPage.Drop()
it.curPage = nil
continue
}
diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go
index eac8276400b9..41854c60b4c9 100644
--- a/pkg/storage/bloom/v1/index.go
+++ b/pkg/storage/bloom/v1/index.go
@@ -173,19 +173,14 @@ func (b *BlockIndex) NewSeriesPageDecoder(r io.ReadSeeker, header SeriesPageHead
return nil, errors.Wrap(err, "getting decompressor")
}
- decompressed := BlockPool.Get(header.DecompressedLen)[:header.DecompressedLen]
+ decompressed := make([]byte, header.DecompressedLen)
if _, err = io.ReadFull(decompressor, decompressed); err != nil {
return nil, errors.Wrap(err, "decompressing series page")
}
- // replace decoder's input with the now-decompressed data
- dec.B = decompressed
-
res := &SeriesPageDecoder{
data: decompressed,
header: header.SeriesHeader,
-
- i: -1,
}
res.Reset()
From 2f54f8d5be3d1bc8c2b675e7689ea6e9b3340b7a Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Fri, 23 Feb 2024 20:56:21 +0100
Subject: [PATCH 117/130] chore(blooms): Implement BloomStore as a service
(#12044)
The bloomstore service is instantiated once at startup and used as dependency for both the bloom compactor and the bloom gateway.
This will prevent instantiating the store in different ways.
Signed-off-by: Christian Haudum
---
pkg/bloomcompactor/bloomcompactor.go | 19 +++---
pkg/bloomgateway/bloomgateway.go | 33 +---------
pkg/bloomgateway/bloomgateway_test.go | 90 +++++++++------------------
pkg/logproto/compat_test.go | 5 +-
pkg/loki/loki.go | 7 ++-
pkg/loki/modules.go | 53 +++++++++++++---
6 files changed, 91 insertions(+), 116 deletions(-)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 85bca48f54f3..40eec568247f 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -67,15 +67,17 @@ func New(
fetcherProvider stores.ChunkFetcherProvider,
sharding util_ring.TenantSharding,
limits Limits,
+ store bloomshipper.Store,
logger log.Logger,
r prometheus.Registerer,
) (*Compactor, error) {
c := &Compactor{
- cfg: cfg,
- schemaCfg: schemaCfg,
- logger: logger,
- sharding: sharding,
- limits: limits,
+ cfg: cfg,
+ schemaCfg: schemaCfg,
+ logger: logger,
+ sharding: sharding,
+ limits: limits,
+ bloomStore: store,
}
tsdbStore, err := NewTSDBStores(schemaCfg, storeCfg, clientMetrics)
@@ -84,13 +86,6 @@ func New(
}
c.tsdbStore = tsdbStore
- // TODO(owen-d): export bloomstore as a dependency that can be reused by the compactor & gateway rather that
- bloomStore, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storeCfg, clientMetrics, nil, nil, logger)
- if err != nil {
- return nil, errors.Wrap(err, "failed to create bloom store")
- }
- c.bloomStore = bloomStore
-
// initialize metrics
c.btMetrics = v1.NewMetrics(prometheus.WrapRegistererWithPrefix("loki_bloom_tokenizer_", r))
c.metrics = NewMetrics(r, c.btMetrics)
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index d0ac92db59a3..ee358ebbbc66 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -59,12 +59,8 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
- "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/queue"
- "github.com/grafana/loki/pkg/storage"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
- "github.com/grafana/loki/pkg/storage/chunk/cache"
- "github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/constants"
@@ -181,7 +177,7 @@ func (l *fixedQueueLimits) MaxConsumers(_ string, _ int) int {
}
// New returns a new instance of the Bloom Gateway.
-func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, overrides Limits, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
+func New(cfg Config, store bloomshipper.Store, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
g := &Gateway{
cfg: cfg,
logger: logger,
@@ -192,35 +188,11 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, o
},
workerMetrics: newWorkerMetrics(reg, constants.Loki, metricsSubsystem),
queueMetrics: queue.NewMetrics(reg, constants.Loki, metricsSubsystem),
+ bloomStore: store,
}
- var err error
-
g.queue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, time.Minute, &fixedQueueLimits{0}, g.queueMetrics)
g.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(g.queueMetrics.Cleanup)
- var metasCache cache.Cache
- mcCfg := storageCfg.BloomShipperConfig.MetasCache
- if cache.IsCacheConfigured(mcCfg) {
- metasCache, err = cache.New(mcCfg, reg, logger, stats.BloomMetasCache, constants.Loki)
- if err != nil {
- return nil, err
- }
- }
-
- var blocksCache cache.TypedCache[string, bloomshipper.BlockDirectory]
- bcCfg := storageCfg.BloomShipperConfig.BlocksCache
- if bcCfg.IsEnabled() {
- blocksCache = bloomshipper.NewBlocksCache(bcCfg, reg, logger)
- }
-
- store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, cm, metasCache, blocksCache, logger)
- if err != nil {
- return nil, err
- }
-
- // We need to keep a reference to be able to call Stop() on shutdown of the gateway.
- g.bloomStore = store
-
if err := g.initServices(); err != nil {
return nil, err
}
@@ -286,7 +258,6 @@ func (g *Gateway) running(ctx context.Context) error {
}
func (g *Gateway) stopping(_ error) error {
- g.bloomStore.Stop()
return services.StopManagerAndAwaitStopped(context.Background(), g.serviceMngr)
}
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index f853398894e0..449c8b17a538 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"math/rand"
- "os"
"testing"
"time"
@@ -26,6 +25,7 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/config"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
bloomshipperconfig "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
lokiring "github.com/grafana/loki/pkg/util/ring"
"github.com/grafana/loki/pkg/validation"
@@ -46,10 +46,8 @@ func newLimits() *validation.Overrides {
return overrides
}
-func TestBloomGateway_StartStopService(t *testing.T) {
+func setupBloomStore(t *testing.T) *bloomshipper.BloomStore {
logger := log.NewNopLogger()
- reg := prometheus.NewRegistry()
- limits := newLimits()
cm := storage.NewClientMetrics()
t.Cleanup(cm.Unregister)
@@ -79,6 +77,17 @@ func TestBloomGateway_StartStopService(t *testing.T) {
},
}
+ store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, cm, nil, nil, logger)
+ require.NoError(t, err)
+ t.Cleanup(store.Stop)
+
+ return store
+}
+
+func TestBloomGateway_StartStopService(t *testing.T) {
+ logger := log.NewNopLogger()
+ reg := prometheus.NewRegistry()
+
t.Run("start and stop bloom gateway", func(t *testing.T) {
kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), logger, reg)
t.Cleanup(func() {
@@ -99,7 +108,8 @@ func TestBloomGateway_StartStopService(t *testing.T) {
MaxOutstandingPerTenant: 1024,
}
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ store := setupBloomStore(t)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -116,37 +126,10 @@ func TestBloomGateway_StartStopService(t *testing.T) {
func TestBloomGateway_FilterChunkRefs(t *testing.T) {
tenantID := "test"
- logger := log.NewLogfmtLogger(os.Stderr)
- reg := prometheus.NewRegistry()
- limits := newLimits()
-
- cm := storage.NewClientMetrics()
- t.Cleanup(cm.Unregister)
- p := config.PeriodConfig{
- From: parseDayTime("2023-09-01"),
- IndexTables: config.IndexPeriodicTableConfig{
- PeriodicTableConfig: config.PeriodicTableConfig{
- Prefix: "index_",
- Period: 24 * time.Hour,
- },
- },
- IndexType: config.TSDBType,
- ObjectType: config.StorageTypeFileSystem,
- Schema: "v13",
- RowShards: 16,
- }
- schemaCfg := config.SchemaConfig{
- Configs: []config.PeriodConfig{p},
- }
- storageCfg := storage.Config{
- BloomShipperConfig: bloomshipperconfig.Config{
- WorkingDirectory: t.TempDir(),
- },
- FSConfig: local.FSConfig{
- Directory: t.TempDir(),
- },
- }
+ store := setupBloomStore(t)
+ logger := log.NewNopLogger()
+ reg := prometheus.NewRegistry()
kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), logger, reg)
t.Cleanup(func() {
@@ -168,20 +151,14 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
}
t.Run("shipper error is propagated", func(t *testing.T) {
- reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
- require.NoError(t, err)
-
now := mktime("2023-10-03 10:00")
- // replace store implementation and re-initialize workers and sub-services
_, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
-
mockStore := newMockBloomStore(queriers, metas)
mockStore.err = errors.New("request failed")
- gw.bloomStore = mockStore
- err = gw.initServices()
+ reg := prometheus.NewRegistry()
+ gw, err := New(cfg, mockStore, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -216,20 +193,15 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("request cancellation does not result in channel locking", func(t *testing.T) {
- reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
- require.NoError(t, err)
-
now := mktime("2024-01-25 10:00")
// replace store implementation and re-initialize workers and sub-services
_, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
-
mockStore := newMockBloomStore(queriers, metas)
mockStore.delay = 2000 * time.Millisecond
- gw.bloomStore = mockStore
- err = gw.initServices()
+ reg := prometheus.NewRegistry()
+ gw, err := New(cfg, mockStore, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -264,8 +236,10 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("returns unfiltered chunk refs if no filters provided", func(t *testing.T) {
+ now := mktime("2023-10-03 10:00")
+
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -275,8 +249,6 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
require.NoError(t, err)
})
- now := mktime("2023-10-03 10:00")
-
chunkRefs := []*logproto.ChunkRef{
{Fingerprint: 3000, UserID: tenantID, From: now.Add(-24 * time.Hour), Through: now.Add(-23 * time.Hour), Checksum: 1},
{Fingerprint: 1000, UserID: tenantID, From: now.Add(-22 * time.Hour), Through: now.Add(-21 * time.Hour), Checksum: 2},
@@ -309,8 +281,10 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("gateway tracks active users", func(t *testing.T) {
+ now := mktime("2023-10-03 10:00")
+
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -320,8 +294,6 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
require.NoError(t, err)
})
- now := mktime("2023-10-03 10:00")
-
tenants := []string{"tenant-a", "tenant-b", "tenant-c"}
for idx, tenantID := range tenants {
chunkRefs := []*logproto.ChunkRef{
@@ -349,12 +321,12 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("use fuse queriers to filter chunks", func(t *testing.T) {
+ now := mktime("2023-10-03 10:00")
+
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
- now := mktime("2023-10-03 10:00")
-
// replace store implementation and re-initialize workers and sub-services
_, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
diff --git a/pkg/logproto/compat_test.go b/pkg/logproto/compat_test.go
index a066fe65fed1..d4de93638f82 100644
--- a/pkg/logproto/compat_test.go
+++ b/pkg/logproto/compat_test.go
@@ -7,12 +7,13 @@ import (
"testing"
"unsafe"
- "github.com/grafana/loki/pkg/logql/syntax"
- "github.com/grafana/loki/pkg/querier/plan"
jsoniter "github.com/json-iterator/go"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
)
// This test verifies that jsoninter uses our custom method for marshalling.
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 75401decb8fc..d8ee613f6108 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -54,6 +54,7 @@ import (
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/series/index"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/indexgateway"
"github.com/grafana/loki/pkg/tracing"
"github.com/grafana/loki/pkg/util"
@@ -304,6 +305,7 @@ type Loki struct {
querierAPI *querier.QuerierAPI
ingesterQuerier *querier.IngesterQuerier
Store storage.Store
+ BloomStore bloomshipper.Store
tableManager *index.TableManager
frontend Frontend
ruler *base_ruler.Ruler
@@ -602,6 +604,7 @@ func (t *Loki) setupModuleManager() error {
mm.RegisterModule(RuleEvaluator, t.initRuleEvaluator, modules.UserInvisibleModule)
mm.RegisterModule(TableManager, t.initTableManager)
mm.RegisterModule(Compactor, t.initCompactor)
+ mm.RegisterModule(BloomStore, t.initBloomStore)
mm.RegisterModule(BloomCompactor, t.initBloomCompactor)
mm.RegisterModule(BloomCompactorRing, t.initBloomCompactorRing, modules.UserInvisibleModule)
mm.RegisterModule(IndexGateway, t.initIndexGateway)
@@ -638,8 +641,8 @@ func (t *Loki) setupModuleManager() error {
TableManager: {Server, Analytics},
Compactor: {Server, Overrides, MemberlistKV, Analytics},
IndexGateway: {Server, Store, IndexGatewayRing, IndexGatewayInterceptors, Analytics},
- BloomGateway: {Server, BloomGatewayRing, Analytics},
- BloomCompactor: {Server, BloomCompactorRing, Analytics, Store},
+ BloomGateway: {Server, BloomStore, BloomGatewayRing, Analytics},
+ BloomCompactor: {Server, BloomStore, BloomCompactorRing, Analytics, Store},
IngesterQuerier: {Ring},
QuerySchedulerRing: {Overrides, MemberlistKV},
IndexGatewayRing: {Overrides, MemberlistKV},
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 9d5a614dc579..5b73be62ca51 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -34,6 +34,7 @@ import (
"github.com/prometheus/common/version"
"github.com/grafana/loki/pkg/bloomcompactor"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/analytics"
"github.com/grafana/loki/pkg/bloomgateway"
@@ -64,6 +65,7 @@ import (
chunk_util "github.com/grafana/loki/pkg/storage/chunk/client/util"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/series/index"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/boltdb"
boltdbcompactor "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/boltdb/compactor"
@@ -117,6 +119,7 @@ const (
QuerySchedulerRing string = "query-scheduler-ring"
BloomCompactor string = "bloom-compactor"
BloomCompactorRing string = "bloom-compactor-ring"
+ BloomStore string = "bloom-store"
All string = "all"
Read string = "read"
Write string = "write"
@@ -645,6 +648,43 @@ func (t *Loki) initStore() (services.Service, error) {
}), nil
}
+func (t *Loki) initBloomStore() (services.Service, error) {
+ if !config.UsingObjectStorageIndex(t.Cfg.SchemaConfig.Configs) {
+ return nil, errors.New("not using shipper index type")
+ }
+
+ t.updateConfigForShipperStore()
+
+ var err error
+ logger := log.With(util_log.Logger, "component", "bloomstore")
+
+ reg := prometheus.DefaultRegisterer
+ bsCfg := t.Cfg.StorageConfig.BloomShipperConfig
+
+ var metasCache cache.Cache
+ if cache.IsCacheConfigured(bsCfg.MetasCache) {
+ metasCache, err = cache.New(bsCfg.MetasCache, reg, logger, stats.BloomMetasCache, constants.Loki)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create metas cache: %w", err)
+ }
+ }
+
+ var blocksCache cache.TypedCache[string, bloomshipper.BlockDirectory]
+ if bsCfg.BlocksCache.IsEnabled() {
+ blocksCache = bloomshipper.NewBlocksCache(bsCfg.BlocksCache, reg, logger)
+ }
+
+ t.BloomStore, err = bloomshipper.NewBloomStore(t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.clientMetrics, metasCache, blocksCache, logger)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create bloom store: %w", err)
+ }
+
+ return services.NewIdleService(nil, func(_ error) error {
+ t.BloomStore.Stop()
+ return nil
+ }), nil
+}
+
func (t *Loki) updateConfigForShipperStore() {
// Always set these configs
t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Mode = t.Cfg.IndexGateway.Mode
@@ -1272,7 +1312,7 @@ func (t *Loki) addCompactorMiddleware(h http.HandlerFunc) http.Handler {
func (t *Loki) initBloomGateway() (services.Service, error) {
logger := log.With(util_log.Logger, "component", "bloom-gateway")
- gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.Cfg.SchemaConfig, t.Cfg.StorageConfig, t.Overrides, t.clientMetrics, logger, prometheus.DefaultRegisterer)
+ gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.BloomStore, logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, err
}
@@ -1415,13 +1455,11 @@ func (t *Loki) initIndexGatewayInterceptors() (services.Service, error) {
}
func (t *Loki) initBloomCompactor() (services.Service, error) {
- t.updateConfigForShipperStore()
-
logger := log.With(util_log.Logger, "component", "bloom-compactor")
shuffleSharding := util_ring.NewTenantShuffleSharding(t.bloomCompactorRingManager.Ring, t.bloomCompactorRingManager.RingLifecycler, t.Overrides.BloomCompactorShardSize)
- compactor, err := bloomcompactor.New(
+ return bloomcompactor.New(
t.Cfg.BloomCompactor,
t.Cfg.SchemaConfig,
t.Cfg.StorageConfig,
@@ -1429,15 +1467,10 @@ func (t *Loki) initBloomCompactor() (services.Service, error) {
t.Store,
shuffleSharding,
t.Overrides,
+ t.BloomStore,
logger,
prometheus.DefaultRegisterer,
)
-
- if err != nil {
- return nil, err
- }
-
- return compactor, nil
}
func (t *Loki) initBloomCompactorRing() (services.Service, error) {
From 6578a00481241494ded98cea674fced9e024864b Mon Sep 17 00:00:00 2001
From: Karsten Jeschkies
Date: Sat, 24 Feb 2024 08:56:19 +0100
Subject: [PATCH 118/130] feat: Support usage trackers for received and
discarded bytes. (#11840)
---
CHANGELOG.md | 1 +
.../promtail/targets/lokipush/pushtarget.go | 2 +-
pkg/bloomcompactor/bloomcompactor.go | 2 +-
pkg/distributor/distributor.go | 47 +++++--
pkg/distributor/distributor_test.go | 10 +-
pkg/distributor/http.go | 2 +-
pkg/distributor/validator.go | 26 +++-
pkg/distributor/validator_test.go | 21 +--
pkg/ingester/instance.go | 32 +++--
pkg/loghttp/push/otlp.go | 16 ++-
pkg/loghttp/push/otlp_test.go | 29 ++++-
pkg/loghttp/push/push.go | 50 ++++---
pkg/loghttp/push/push_test.go | 122 +++++++++++-------
pkg/loghttp/push/usage_tracker.go | 16 +++
pkg/logql/rangemapper.go | 2 +-
pkg/loki/loki.go | 3 +
pkg/loki/modules.go | 1 +
17 files changed, 272 insertions(+), 110 deletions(-)
create mode 100644 pkg/loghttp/push/usage_tracker.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fa8861228407..fb09c05dded7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@
##### Enhancements
+* [11840](https://github.com/grafana/loki/pull/11840) **jeschkies**: Allow custom usage trackers for ingested and discarded bytes metric.
* [11814](https://github.com/grafana/loki/pull/11814) **kavirajk**: feat: Support split align and caching for instant metric query results
* [11851](https://github.com/grafana/loki/pull/11851) **elcomtik**: Helm: Allow the definition of resources for GrafanaAgent pods.
* [11819](https://github.com/grafana/loki/pull/11819) **jburnham**: Ruler: Add the ability to disable the `X-Scope-OrgId` tenant identification header in remote write requests.
diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget.go b/clients/pkg/promtail/targets/lokipush/pushtarget.go
index c981de0de3dd..88c7859bd36e 100644
--- a/clients/pkg/promtail/targets/lokipush/pushtarget.go
+++ b/clients/pkg/promtail/targets/lokipush/pushtarget.go
@@ -111,7 +111,7 @@ func (t *PushTarget) run() error {
func (t *PushTarget) handleLoki(w http.ResponseWriter, r *http.Request) {
logger := util_log.WithContext(r.Context(), util_log.Logger)
userID, _ := tenant.TenantID(r.Context())
- req, err := push.ParseRequest(logger, userID, r, nil, nil, push.ParseLokiRequest)
+ req, err := push.ParseRequest(logger, userID, r, nil, push.EmptyLimits{}, push.ParseLokiRequest, nil)
if err != nil {
level.Warn(t.logger).Log("msg", "failed to parse incoming push request", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 40eec568247f..da3c70d81b89 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -194,7 +194,7 @@ func (c *Compactor) ownsTenant(tenant string) ([]v1.FingerprintBounds, bool, err
return nil, false, nil
}
- // TOOD(owen-d): use .GetTokenRangesForInstance()
+ // TODO(owen-d): use .GetTokenRangesForInstance()
// when it's supported for non zone-aware rings
// instead of doing all this manually
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index f47148fa42b0..53ff20ed9274 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -39,6 +39,7 @@ import (
"github.com/grafana/loki/pkg/distributor/writefailures"
"github.com/grafana/loki/pkg/ingester"
"github.com/grafana/loki/pkg/ingester/client"
+ "github.com/grafana/loki/pkg/loghttp/push"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/runtime"
@@ -126,6 +127,8 @@ type Distributor struct {
ingesterAppendTimeouts *prometheus.CounterVec
replicationFactor prometheus.Gauge
streamShardCount prometheus.Counter
+
+ usageTracker push.UsageTracker
}
// New a distributor creates.
@@ -138,6 +141,7 @@ func New(
registerer prometheus.Registerer,
metricsNamespace string,
tee Tee,
+ usageTracker push.UsageTracker,
logger log.Logger,
) (*Distributor, error) {
factory := cfg.factory
@@ -153,7 +157,7 @@ func New(
return client.New(internalCfg, addr)
}
- validator, err := NewValidator(overrides)
+ validator, err := NewValidator(overrides, usageTracker)
if err != nil {
return nil, err
}
@@ -185,6 +189,7 @@ func New(
healthyInstancesCount: atomic.NewUint32(0),
rateLimitStrat: rateLimitStrat,
tee: tee,
+ usageTracker: usageTracker,
ingesterAppends: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{
Namespace: constants.Loki,
Name: "distributor_ingester_appends_total",
@@ -337,7 +342,8 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
// Truncate first so subsequent steps have consistent line lengths
d.truncateLines(validationContext, &stream)
- stream.Labels, stream.Hash, err = d.parseStreamLabels(validationContext, stream.Labels, &stream)
+ var lbs labels.Labels
+ lbs, stream.Labels, stream.Hash, err = d.parseStreamLabels(validationContext, stream.Labels, &stream)
if err != nil {
d.writeFailuresManager.Log(tenantID, err)
validationErrors.Add(err)
@@ -354,7 +360,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
pushSize := 0
prevTs := stream.Entries[0].Timestamp
for _, entry := range stream.Entries {
- if err := d.validator.ValidateEntry(validationContext, stream.Labels, entry); err != nil {
+ if err := d.validator.ValidateEntry(validationContext, lbs, entry); err != nil {
d.writeFailuresManager.Log(tenantID, err)
validationErrors.Add(err)
continue
@@ -412,6 +418,24 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
validation.DiscardedSamples.WithLabelValues(validation.RateLimited, tenantID).Add(float64(validatedLineCount))
validation.DiscardedBytes.WithLabelValues(validation.RateLimited, tenantID).Add(float64(validatedLineSize))
+ if d.usageTracker != nil {
+ for _, stream := range req.Streams {
+ lbs, _, _, err := d.parseStreamLabels(validationContext, stream.Labels, &stream)
+ if err != nil {
+ continue
+ }
+
+ discardedStreamBytes := 0
+ for _, e := range stream.Entries {
+ discardedStreamBytes += len(e.Line)
+ }
+
+ if d.usageTracker != nil {
+ d.usageTracker.DiscardedBytesAdd(tenantID, validation.RateLimited, lbs, float64(discardedStreamBytes))
+ }
+ }
+ }
+
err = fmt.Errorf(validation.RateLimitedErrorMsg, tenantID, int(d.ingestionRateLimiter.Limit(now, tenantID)), validatedLineCount, validatedLineSize)
d.writeFailuresManager.Log(tenantID, err)
return nil, httpgrpc.Errorf(http.StatusTooManyRequests, err.Error())
@@ -684,30 +708,29 @@ func (d *Distributor) sendStreamsErr(ctx context.Context, ingester ring.Instance
}
type labelData struct {
- labels string
- hash uint64
+ ls labels.Labels
+ hash uint64
}
-func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream *logproto.Stream) (string, uint64, error) {
+func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream *logproto.Stream) (labels.Labels, string, uint64, error) {
if val, ok := d.labelCache.Get(key); ok {
labelVal := val.(labelData)
- return labelVal.labels, labelVal.hash, nil
+ return labelVal.ls, labelVal.ls.String(), labelVal.hash, nil
}
ls, err := syntax.ParseLabels(key)
if err != nil {
- return "", 0, fmt.Errorf(validation.InvalidLabelsErrorMsg, key, err)
+ return nil, "", 0, fmt.Errorf(validation.InvalidLabelsErrorMsg, key, err)
}
if err := d.validator.ValidateLabels(vContext, ls, *stream); err != nil {
- return "", 0, err
+ return nil, "", 0, err
}
- lsVal := ls.String()
lsHash := ls.Hash()
- d.labelCache.Add(key, labelData{lsVal, lsHash})
- return lsVal, lsHash, nil
+ d.labelCache.Add(key, labelData{ls, lsHash})
+ return ls, ls.String(), lsHash, nil
}
// shardCountFor returns the right number of shards to be used by the given stream.
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 75e3a6e78670..04747ffb7233 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -612,7 +612,7 @@ func TestStreamShard(t *testing.T) {
overrides, err := validation.NewOverrides(*distributorLimits, nil)
require.NoError(t, err)
- validator, err := NewValidator(overrides)
+ validator, err := NewValidator(overrides, nil)
require.NoError(t, err)
d := Distributor{
@@ -656,7 +656,7 @@ func TestStreamShardAcrossCalls(t *testing.T) {
overrides, err := validation.NewOverrides(*distributorLimits, nil)
require.NoError(t, err)
- validator, err := NewValidator(overrides)
+ validator, err := NewValidator(overrides, nil)
require.NoError(t, err)
t.Run("it generates 4 shards across 2 calls when calculated shards = 2 * entries per call", func(t *testing.T) {
@@ -721,7 +721,7 @@ func BenchmarkShardStream(b *testing.B) {
overrides, err := validation.NewOverrides(*distributorLimits, nil)
require.NoError(b, err)
- validator, err := NewValidator(overrides)
+ validator, err := NewValidator(overrides, nil)
require.NoError(b, err)
distributorBuilder := func(shards int) *Distributor {
@@ -788,7 +788,7 @@ func Benchmark_SortLabelsOnPush(b *testing.B) {
for n := 0; n < b.N; n++ {
stream := request.Streams[0]
stream.Labels = `{buzz="f", a="b"}`
- _, _, err := d.parseStreamLabels(vCtx, stream.Labels, &stream)
+ _, _, _, err := d.parseStreamLabels(vCtx, stream.Labels, &stream)
if err != nil {
panic("parseStreamLabels fail,err:" + err.Error())
}
@@ -1159,7 +1159,7 @@ func prepare(t *testing.T, numDistributors, numIngesters int, limits *validation
overrides, err := validation.NewOverrides(*limits, nil)
require.NoError(t, err)
- d, err := New(distributorConfig, clientConfig, runtime.DefaultTenantConfigs(), ingestersRing, overrides, prometheus.NewPedanticRegistry(), constants.Loki, nil, log.NewNopLogger())
+ d, err := New(distributorConfig, clientConfig, runtime.DefaultTenantConfigs(), ingestersRing, overrides, prometheus.NewPedanticRegistry(), constants.Loki, nil, nil, log.NewNopLogger())
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), d))
distributors[i] = d
diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go
index ce242355e077..d2582f027f9b 100644
--- a/pkg/distributor/http.go
+++ b/pkg/distributor/http.go
@@ -34,7 +34,7 @@ func (d *Distributor) pushHandler(w http.ResponseWriter, r *http.Request, pushRe
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
- req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention, d.validator.Limits, pushRequestParser)
+ req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention, d.validator.Limits, pushRequestParser, d.usageTracker)
if err != nil {
if d.tenantConfigs.LogPushRequest(tenantID) {
level.Debug(logger).Log(
diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go
index 7fe76fae7823..f1f2e4acb0ea 100644
--- a/pkg/distributor/validator.go
+++ b/pkg/distributor/validator.go
@@ -8,6 +8,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
+ "github.com/grafana/loki/pkg/loghttp/push"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/validation"
)
@@ -18,13 +19,14 @@ const (
type Validator struct {
Limits
+ usageTracker push.UsageTracker
}
-func NewValidator(l Limits) (*Validator, error) {
+func NewValidator(l Limits, t push.UsageTracker) (*Validator, error) {
if l == nil {
return nil, errors.New("nil Limits")
}
- return &Validator{l}, nil
+ return &Validator{l, t}, nil
}
type validationContext struct {
@@ -67,7 +69,7 @@ func (v Validator) getValidationContextForTime(now time.Time, userID string) val
}
// ValidateEntry returns an error if the entry is invalid and report metrics for invalid entries accordingly.
-func (v Validator) ValidateEntry(ctx validationContext, labels string, entry logproto.Entry) error {
+func (v Validator) ValidateEntry(ctx validationContext, labels labels.Labels, entry logproto.Entry) error {
ts := entry.Timestamp.UnixNano()
validation.LineLengthHist.Observe(float64(len(entry.Line)))
@@ -77,6 +79,9 @@ func (v Validator) ValidateEntry(ctx validationContext, labels string, entry log
formatedRejectMaxAgeTime := time.Unix(0, ctx.rejectOldSampleMaxAge).Format(timeFormat)
validation.DiscardedSamples.WithLabelValues(validation.GreaterThanMaxSampleAge, ctx.userID).Inc()
validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, ctx.userID).Add(float64(len(entry.Line)))
+ if v.usageTracker != nil {
+ v.usageTracker.DiscardedBytesAdd(ctx.userID, validation.GreaterThanMaxSampleAge, labels, float64(len(entry.Line)))
+ }
return fmt.Errorf(validation.GreaterThanMaxSampleAgeErrorMsg, labels, formatedEntryTime, formatedRejectMaxAgeTime)
}
@@ -84,6 +89,9 @@ func (v Validator) ValidateEntry(ctx validationContext, labels string, entry log
formatedEntryTime := entry.Timestamp.Format(timeFormat)
validation.DiscardedSamples.WithLabelValues(validation.TooFarInFuture, ctx.userID).Inc()
validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, ctx.userID).Add(float64(len(entry.Line)))
+ if v.usageTracker != nil {
+ v.usageTracker.DiscardedBytesAdd(ctx.userID, validation.TooFarInFuture, labels, float64(len(entry.Line)))
+ }
return fmt.Errorf(validation.TooFarInFutureErrorMsg, labels, formatedEntryTime)
}
@@ -94,6 +102,9 @@ func (v Validator) ValidateEntry(ctx validationContext, labels string, entry log
// for parity.
validation.DiscardedSamples.WithLabelValues(validation.LineTooLong, ctx.userID).Inc()
validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, ctx.userID).Add(float64(len(entry.Line)))
+ if v.usageTracker != nil {
+ v.usageTracker.DiscardedBytesAdd(ctx.userID, validation.LineTooLong, labels, float64(len(entry.Line)))
+ }
return fmt.Errorf(validation.LineTooLongErrorMsg, maxSize, labels, len(entry.Line))
}
@@ -101,6 +112,9 @@ func (v Validator) ValidateEntry(ctx validationContext, labels string, entry log
if !ctx.allowStructuredMetadata {
validation.DiscardedSamples.WithLabelValues(validation.DisallowedStructuredMetadata, ctx.userID).Inc()
validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, ctx.userID).Add(float64(len(entry.Line)))
+ if v.usageTracker != nil {
+ v.usageTracker.DiscardedBytesAdd(ctx.userID, validation.DisallowedStructuredMetadata, labels, float64(len(entry.Line)))
+ }
return fmt.Errorf(validation.DisallowedStructuredMetadataErrorMsg, labels)
}
@@ -113,12 +127,18 @@ func (v Validator) ValidateEntry(ctx validationContext, labels string, entry log
if maxSize := ctx.maxStructuredMetadataSize; maxSize != 0 && structuredMetadataSizeBytes > maxSize {
validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooLarge, ctx.userID).Inc()
validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, ctx.userID).Add(float64(len(entry.Line)))
+ if v.usageTracker != nil {
+ v.usageTracker.DiscardedBytesAdd(ctx.userID, validation.StructuredMetadataTooLarge, labels, float64(len(entry.Line)))
+ }
return fmt.Errorf(validation.StructuredMetadataTooLargeErrorMsg, labels, structuredMetadataSizeBytes, ctx.maxStructuredMetadataSize)
}
if maxCount := ctx.maxStructuredMetadataCount; maxCount != 0 && structuredMetadataCount > maxCount {
validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooMany, ctx.userID).Inc()
validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, ctx.userID).Add(float64(len(entry.Line)))
+ if v.usageTracker != nil {
+ v.usageTracker.DiscardedBytesAdd(ctx.userID, validation.StructuredMetadataTooMany, labels, float64(len(entry.Line)))
+ }
return fmt.Errorf(validation.StructuredMetadataTooManyErrorMsg, labels, structuredMetadataCount, ctx.maxStructuredMetadataCount)
}
}
diff --git a/pkg/distributor/validator_test.go b/pkg/distributor/validator_test.go
index 038f1dc4c5b7..0c37065e3056 100644
--- a/pkg/distributor/validator_test.go
+++ b/pkg/distributor/validator_test.go
@@ -18,8 +18,9 @@ import (
)
var (
- testStreamLabels = "FIXME"
- testTime = time.Now()
+ testStreamLabels = labels.Labels{{Name: "my", Value: "label"}}
+ testStreamLabelsString = testStreamLabels.String()
+ testTime = time.Now()
)
type fakeLimits struct {
@@ -61,7 +62,7 @@ func TestValidator_ValidateEntry(t *testing.T) {
},
logproto.Entry{Timestamp: testTime.Add(-time.Hour * 5), Line: "test"},
fmt.Errorf(validation.GreaterThanMaxSampleAgeErrorMsg,
- testStreamLabels,
+ testStreamLabelsString,
testTime.Add(-time.Hour*5).Format(timeFormat),
testTime.Add(-1*time.Hour).Format(timeFormat), // same as RejectOldSamplesMaxAge
),
@@ -71,7 +72,7 @@ func TestValidator_ValidateEntry(t *testing.T) {
"test",
nil,
logproto.Entry{Timestamp: testTime.Add(time.Hour * 5), Line: "test"},
- fmt.Errorf(validation.TooFarInFutureErrorMsg, testStreamLabels, testTime.Add(time.Hour*5).Format(timeFormat)),
+ fmt.Errorf(validation.TooFarInFutureErrorMsg, testStreamLabelsString, testTime.Add(time.Hour*5).Format(timeFormat)),
},
{
"line too long",
@@ -82,7 +83,7 @@ func TestValidator_ValidateEntry(t *testing.T) {
},
},
logproto.Entry{Timestamp: testTime, Line: "12345678901"},
- fmt.Errorf(validation.LineTooLongErrorMsg, 10, testStreamLabels, 11),
+ fmt.Errorf(validation.LineTooLongErrorMsg, 10, testStreamLabelsString, 11),
},
{
"disallowed structured metadata",
@@ -93,7 +94,7 @@ func TestValidator_ValidateEntry(t *testing.T) {
},
},
logproto.Entry{Timestamp: testTime, Line: "12345678901", StructuredMetadata: push.LabelsAdapter{{Name: "foo", Value: "bar"}}},
- fmt.Errorf(validation.DisallowedStructuredMetadataErrorMsg, testStreamLabels),
+ fmt.Errorf(validation.DisallowedStructuredMetadataErrorMsg, testStreamLabelsString),
},
{
"structured metadata too big",
@@ -105,7 +106,7 @@ func TestValidator_ValidateEntry(t *testing.T) {
},
},
logproto.Entry{Timestamp: testTime, Line: "12345678901", StructuredMetadata: push.LabelsAdapter{{Name: "foo", Value: "bar"}}},
- fmt.Errorf(validation.StructuredMetadataTooLargeErrorMsg, testStreamLabels, 6, 4),
+ fmt.Errorf(validation.StructuredMetadataTooLargeErrorMsg, testStreamLabelsString, 6, 4),
},
{
"structured metadata too many",
@@ -117,7 +118,7 @@ func TestValidator_ValidateEntry(t *testing.T) {
},
},
logproto.Entry{Timestamp: testTime, Line: "12345678901", StructuredMetadata: push.LabelsAdapter{{Name: "foo", Value: "bar"}, {Name: "too", Value: "many"}}},
- fmt.Errorf(validation.StructuredMetadataTooManyErrorMsg, testStreamLabels, 2, 1),
+ fmt.Errorf(validation.StructuredMetadataTooManyErrorMsg, testStreamLabelsString, 2, 1),
},
}
for _, tt := range tests {
@@ -126,7 +127,7 @@ func TestValidator_ValidateEntry(t *testing.T) {
flagext.DefaultValues(l)
o, err := validation.NewOverrides(*l, tt.overrides)
assert.NoError(t, err)
- v, err := NewValidator(o)
+ v, err := NewValidator(o, nil)
assert.NoError(t, err)
err = v.ValidateEntry(v.getValidationContextForTime(testTime, tt.userID), testStreamLabels, tt.entry)
@@ -224,7 +225,7 @@ func TestValidator_ValidateLabels(t *testing.T) {
flagext.DefaultValues(l)
o, err := validation.NewOverrides(*l, tt.overrides)
assert.NoError(t, err)
- v, err := NewValidator(o)
+ v, err := NewValidator(o, nil)
assert.NoError(t, err)
err = v.ValidateLabels(v.getValidationContextForTime(testTime, tt.userID), mustParseLabels(tt.labels), logproto.Stream{Labels: tt.labels})
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index f29628d85eeb..c7953ea1aba1 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -30,6 +30,7 @@ import (
"github.com/grafana/loki/pkg/ingester/index"
"github.com/grafana/loki/pkg/ingester/wal"
"github.com/grafana/loki/pkg/iter"
+ "github.com/grafana/loki/pkg/loghttp/push"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/log"
@@ -119,6 +120,8 @@ type instance struct {
writeFailures *writefailures.Manager
schemaconfig *config.SchemaConfig
+
+ customStreamsTracker push.UsageTracker
}
func newInstance(
@@ -262,6 +265,20 @@ func (i *instance) createStream(pushReqStream logproto.Stream, record *wal.Recor
// record is only nil when replaying WAL. We don't want to drop data when replaying a WAL after
// reducing the stream limits, for instance.
var err error
+
+ labels, err := syntax.ParseLabels(pushReqStream.Labels)
+ if err != nil {
+ if i.configs.LogStreamCreation(i.instanceID) {
+ level.Debug(util_log.Logger).Log(
+ "msg", "failed to create stream, failed to parse labels",
+ "org_id", i.instanceID,
+ "err", err,
+ "stream", pushReqStream.Labels,
+ )
+ }
+ return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+ }
+
if record != nil {
err = i.limiter.AssertMaxStreamsPerUser(i.instanceID, i.streams.Len())
}
@@ -282,21 +299,12 @@ func (i *instance) createStream(pushReqStream logproto.Stream, record *wal.Recor
bytes += len(e.Line)
}
validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(bytes))
+ if i.customStreamsTracker != nil {
+ i.customStreamsTracker.DiscardedBytesAdd(i.instanceID, validation.StreamLimit, labels, float64(bytes))
+ }
return nil, httpgrpc.Errorf(http.StatusTooManyRequests, validation.StreamLimitErrorMsg, i.instanceID)
}
- labels, err := syntax.ParseLabels(pushReqStream.Labels)
- if err != nil {
- if i.configs.LogStreamCreation(i.instanceID) {
- level.Debug(util_log.Logger).Log(
- "msg", "failed to create stream, failed to parse labels",
- "org_id", i.instanceID,
- "err", err,
- "stream", pushReqStream.Labels,
- )
- }
- return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
- }
fp := i.getHashForLabels(labels)
sortedLabels := i.index.Add(logproto.FromLabelsToLabelAdapters(labels), fp)
diff --git a/pkg/loghttp/push/otlp.go b/pkg/loghttp/push/otlp.go
index c25477a984e2..cb73f6db59ee 100644
--- a/pkg/loghttp/push/otlp.go
+++ b/pkg/loghttp/push/otlp.go
@@ -43,14 +43,14 @@ func newPushStats() *Stats {
}
}
-func ParseOTLPRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits) (*logproto.PushRequest, *Stats, error) {
+func ParseOTLPRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker) (*logproto.PushRequest, *Stats, error) {
stats := newPushStats()
otlpLogs, err := extractLogs(r, stats)
if err != nil {
return nil, nil, err
}
- req := otlpToLokiPushRequest(otlpLogs, userID, tenantsRetention, limits.OTLPConfig(userID), stats)
+ req := otlpToLokiPushRequest(otlpLogs, userID, tenantsRetention, limits.OTLPConfig(userID), tracker, stats)
return req, stats, nil
}
@@ -101,7 +101,7 @@ func extractLogs(r *http.Request, pushStats *Stats) (plog.Logs, error) {
return req.Logs(), nil
}
-func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention TenantsRetention, otlpConfig OTLPConfig, stats *Stats) *logproto.PushRequest {
+func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention TenantsRetention, otlpConfig OTLPConfig, tracker UsageTracker, stats *Stats) *logproto.PushRequest {
if ld.LogRecordCount() == 0 {
return &logproto.PushRequest{}
}
@@ -145,6 +145,7 @@ func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention Tenants
labelsStr := streamLabels.String()
lbs := modelLabelsSetToLabelsList(streamLabels)
+
if _, ok := pushRequestsByStream[labelsStr]; !ok {
pushRequestsByStream[labelsStr] = logproto.Stream{
Labels: labelsStr,
@@ -223,8 +224,15 @@ func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention Tenants
stream.Entries = append(stream.Entries, entry)
pushRequestsByStream[labelsStr] = stream
- stats.structuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(labelsSize(entry.StructuredMetadata) - resourceAttributesAsStructuredMetadataSize - scopeAttributesAsStructuredMetadataSize)
+ metadataSize := int64(labelsSize(entry.StructuredMetadata) - resourceAttributesAsStructuredMetadataSize - scopeAttributesAsStructuredMetadataSize)
+ stats.structuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += metadataSize
stats.logLinesBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(len(entry.Line))
+
+ if tracker != nil {
+ tracker.ReceivedBytesAdd(userID, tenantsRetention.RetentionPeriodFor(userID, lbs), lbs, float64(len(entry.Line)))
+ tracker.ReceivedBytesAdd(userID, tenantsRetention.RetentionPeriodFor(userID, lbs), lbs, float64(metadataSize))
+ }
+
stats.numLines++
if entry.Timestamp.After(stats.mostRecentEntryTimestamp) {
stats.mostRecentEntryTimestamp = entry.Timestamp
diff --git a/pkg/loghttp/push/otlp_test.go b/pkg/loghttp/push/otlp_test.go
index badb6cd000e4..593ac380e669 100644
--- a/pkg/loghttp/push/otlp_test.go
+++ b/pkg/loghttp/push/otlp_test.go
@@ -25,6 +25,7 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
expectedPushRequest logproto.PushRequest
expectedStats Stats
otlpConfig OTLPConfig
+ tracker UsageTracker
}{
{
name: "no logs",
@@ -121,6 +122,7 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
{
name: "service.name not defined in resource attributes",
otlpConfig: DefaultOTLPConfig,
+ tracker: NewMockTracker(),
generateLogs: func() plog.Logs {
ld := plog.NewLogs()
ld.ResourceLogs().AppendEmpty().Resource().Attributes().PutStr("service.namespace", "foo")
@@ -152,7 +154,32 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
},
streamLabelsSize: 47,
mostRecentEntryTimestamp: now,
+ /*
+ logLinesBytesCustomTrackers: []customTrackerPair{
+ {
+ Labels: []labels.Label{
+ {Name: "service_namespace", Value: "foo"},
+ {Name: "tracker", Value: "foo"},
+ },
+ Bytes: map[time.Duration]int64{
+ time.Hour: 9,
+ },
+ },
+ },
+ structuredMetadataBytesCustomTrackers: []customTrackerPair{
+ {
+ Labels: []labels.Label{
+ {Name: "service_namespace", Value: "foo"},
+ {Name: "tracker", Value: "foo"},
+ },
+ Bytes: map[time.Duration]int64{
+ time.Hour: 0,
+ },
+ },
+ },
+ */
},
+ //expectedTrackedUsaged:
},
{
name: "resource attributes and scope attributes stored as structured metadata",
@@ -459,7 +486,7 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
stats := newPushStats()
- pushReq := otlpToLokiPushRequest(tc.generateLogs(), "foo", fakeRetention{}, tc.otlpConfig, stats)
+ pushReq := otlpToLokiPushRequest(tc.generateLogs(), "foo", fakeRetention{}, tc.otlpConfig, tc.tracker, stats)
require.Equal(t, tc.expectedPushRequest, *pushReq)
require.Equal(t, tc.expectedStats, *stats)
})
diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go
index 15b7bba0a78c..012a70386bd7 100644
--- a/pkg/loghttp/push/push.go
+++ b/pkg/loghttp/push/push.go
@@ -36,6 +36,7 @@ var (
Name: "distributor_bytes_received_total",
Help: "The total number of uncompressed bytes received per tenant. Includes structured metadata bytes.",
}, []string{"tenant", "retention_hours"})
+
structuredMetadataBytesIngested = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: constants.Loki,
Name: "distributor_structured_metadata_bytes_received_total",
@@ -62,7 +63,13 @@ type Limits interface {
OTLPConfig(userID string) OTLPConfig
}
-type RequestParser func(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits) (*logproto.PushRequest, *Stats, error)
+type EmptyLimits struct{}
+
+func (EmptyLimits) OTLPConfig(string) OTLPConfig {
+ return DefaultOTLPConfig
+}
+
+type RequestParser func(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker) (*logproto.PushRequest, *Stats, error)
type Stats struct {
errs []error
@@ -76,8 +83,8 @@ type Stats struct {
bodySize int64
}
-func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, pushRequestParser RequestParser) (*logproto.PushRequest, error) {
- req, pushStats, err := pushRequestParser(userID, r, tenantsRetention, limits)
+func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, pushRequestParser RequestParser, tracker UsageTracker) (*logproto.PushRequest, error) {
+ req, pushStats, err := pushRequestParser(userID, r, tenantsRetention, limits, tracker)
if err != nil {
return nil, err
}
@@ -87,10 +94,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete
structuredMetadataSize int64
)
for retentionPeriod, size := range pushStats.logLinesBytes {
- var retentionHours string
- if retentionPeriod > 0 {
- retentionHours = fmt.Sprintf("%d", int64(math.Floor(retentionPeriod.Hours())))
- }
+ retentionHours := retentionPeriodToString(retentionPeriod)
bytesIngested.WithLabelValues(userID, retentionHours).Add(float64(size))
bytesReceivedStats.Inc(size)
@@ -98,10 +102,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete
}
for retentionPeriod, size := range pushStats.structuredMetadataBytes {
- var retentionHours string
- if retentionPeriod > 0 {
- retentionHours = fmt.Sprintf("%d", int64(math.Floor(retentionPeriod.Hours())))
- }
+ retentionHours := retentionPeriodToString(retentionPeriod)
structuredMetadataBytesIngested.WithLabelValues(userID, retentionHours).Add(float64(size))
bytesIngested.WithLabelValues(userID, retentionHours).Add(float64(size))
@@ -135,7 +136,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete
return req, nil
}
-func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, _ Limits) (*logproto.PushRequest, *Stats, error) {
+func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, _ Limits, tracker UsageTracker) (*logproto.PushRequest, *Stats, error) {
// Body
var body io.Reader
// bodySize should always reflect the compressed size of the request body
@@ -206,12 +207,17 @@ func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRe
for _, s := range req.Streams {
pushStats.streamLabelsSize += int64(len(s.Labels))
- var retentionPeriod time.Duration
- if tenantsRetention != nil {
- lbs, err := syntax.ParseLabels(s.Labels)
+
+ var lbs labels.Labels
+ if tenantsRetention != nil || tracker != nil {
+ lbs, err = syntax.ParseLabels(s.Labels)
if err != nil {
return nil, nil, fmt.Errorf("couldn't parse labels: %w", err)
}
+ }
+
+ var retentionPeriod time.Duration
+ if tenantsRetention != nil {
retentionPeriod = tenantsRetention.RetentionPeriodFor(userID, lbs)
}
for _, e := range s.Entries {
@@ -222,6 +228,12 @@ func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRe
}
pushStats.logLinesBytes[retentionPeriod] += int64(len(e.Line))
pushStats.structuredMetadataBytes[retentionPeriod] += entryLabelsSize
+
+ if tracker != nil {
+ tracker.ReceivedBytesAdd(userID, retentionPeriod, lbs, float64(len(e.Line)))
+ tracker.ReceivedBytesAdd(userID, retentionPeriod, lbs, float64(entryLabelsSize))
+ }
+
if e.Timestamp.After(pushStats.mostRecentEntryTimestamp) {
pushStats.mostRecentEntryTimestamp = e.Timestamp
}
@@ -230,3 +242,11 @@ func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRe
return &req, pushStats, nil
}
+
+func retentionPeriodToString(retentionPeriod time.Duration) string {
+ var retentionHours string
+ if retentionPeriod > 0 {
+ retentionHours = fmt.Sprintf("%d", int64(math.Floor(retentionPeriod.Hours())))
+ }
+ return retentionHours
+}
diff --git a/pkg/loghttp/push/push_test.go b/pkg/loghttp/push/push_test.go
index fa1e2fb28d11..ec4fd8c8f818 100644
--- a/pkg/loghttp/push/push_test.go
+++ b/pkg/loghttp/push/push_test.go
@@ -9,8 +9,10 @@ import (
"net/http/httptest"
"strings"
"testing"
+ "time"
"github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -54,6 +56,7 @@ func TestParseRequest(t *testing.T) {
expectedStructuredMetadataBytes int
expectedBytes int
expectedLines int
+ expectedBytesUsageTracker map[string]float64
}{
{
path: `/loki/api/v1/push`,
@@ -68,21 +71,23 @@ func TestParseRequest(t *testing.T) {
valid: false,
},
{
- path: `/loki/api/v1/push`,
- body: `{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`,
- contentType: `application/json`,
- valid: true,
- expectedBytes: len("fizzbuzz"),
- expectedLines: 1,
+ path: `/loki/api/v1/push`,
+ body: `{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`,
+ contentType: `application/json`,
+ valid: true,
+ expectedBytes: len("fizzbuzz"),
+ expectedLines: 1,
+ expectedBytesUsageTracker: map[string]float64{`{foo="bar2"}`: float64(len("fizzbuss"))},
},
{
- path: `/loki/api/v1/push`,
- body: `{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`,
- contentType: `application/json`,
- contentEncoding: ``,
- valid: true,
- expectedBytes: len("fizzbuzz"),
- expectedLines: 1,
+ path: `/loki/api/v1/push`,
+ body: `{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`,
+ contentType: `application/json`,
+ contentEncoding: ``,
+ valid: true,
+ expectedBytes: len("fizzbuzz"),
+ expectedLines: 1,
+ expectedBytesUsageTracker: map[string]float64{`{foo="bar2"}`: float64(len("fizzbuss"))},
},
{
path: `/loki/api/v1/push`,
@@ -92,22 +97,24 @@ func TestParseRequest(t *testing.T) {
valid: false,
},
{
- path: `/loki/api/v1/push`,
- body: gzipString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
- contentType: `application/json`,
- contentEncoding: `gzip`,
- valid: true,
- expectedBytes: len("fizzbuzz"),
- expectedLines: 1,
+ path: `/loki/api/v1/push`,
+ body: gzipString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
+ contentType: `application/json`,
+ contentEncoding: `gzip`,
+ valid: true,
+ expectedBytes: len("fizzbuzz"),
+ expectedLines: 1,
+ expectedBytesUsageTracker: map[string]float64{`{foo="bar2"}`: float64(len("fizzbuss"))},
},
{
- path: `/loki/api/v1/push`,
- body: deflateString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
- contentType: `application/json`,
- contentEncoding: `deflate`,
- valid: true,
- expectedBytes: len("fizzbuzz"),
- expectedLines: 1,
+ path: `/loki/api/v1/push`,
+ body: deflateString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
+ contentType: `application/json`,
+ contentEncoding: `deflate`,
+ valid: true,
+ expectedBytes: len("fizzbuzz"),
+ expectedLines: 1,
+ expectedBytesUsageTracker: map[string]float64{`{foo="bar2"}`: float64(len("fizzbuss"))},
},
{
path: `/loki/api/v1/push`,
@@ -117,22 +124,24 @@ func TestParseRequest(t *testing.T) {
valid: false,
},
{
- path: `/loki/api/v1/push`,
- body: gzipString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
- contentType: `application/json; charset=utf-8`,
- contentEncoding: `gzip`,
- valid: true,
- expectedBytes: len("fizzbuzz"),
- expectedLines: 1,
+ path: `/loki/api/v1/push`,
+ body: gzipString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
+ contentType: `application/json; charset=utf-8`,
+ contentEncoding: `gzip`,
+ valid: true,
+ expectedBytes: len("fizzbuzz"),
+ expectedLines: 1,
+ expectedBytesUsageTracker: map[string]float64{`{foo="bar2"}`: float64(len("fizzbuss"))},
},
{
- path: `/loki/api/v1/push`,
- body: deflateString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
- contentType: `application/json; charset=utf-8`,
- contentEncoding: `deflate`,
- valid: true,
- expectedBytes: len("fizzbuzz"),
- expectedLines: 1,
+ path: `/loki/api/v1/push`,
+ body: deflateString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
+ contentType: `application/json; charset=utf-8`,
+ contentEncoding: `deflate`,
+ valid: true,
+ expectedBytes: len("fizzbuzz"),
+ expectedLines: 1,
+ expectedBytesUsageTracker: map[string]float64{`{foo="bar2"}`: float64(len("fizzbuss"))},
},
{
path: `/loki/api/v1/push`,
@@ -185,6 +194,7 @@ func TestParseRequest(t *testing.T) {
expectedStructuredMetadataBytes: 2*len("a") + 2*len("b"),
expectedBytes: len("fizzbuzz") + 2*len("a") + 2*len("b"),
expectedLines: 1,
+ expectedBytesUsageTracker: map[string]float64{`{foo="bar2"}`: float64(len("fizzbuzz") + 2*len("a") + 2*len("b"))},
},
} {
t.Run(fmt.Sprintf("test %d", index), func(t *testing.T) {
@@ -200,7 +210,8 @@ func TestParseRequest(t *testing.T) {
request.Header.Add("Content-Encoding", test.contentEncoding)
}
- data, err := ParseRequest(util_log.Logger, "fake", request, nil, nil, ParseLokiRequest)
+ tracker := NewMockTracker()
+ data, err := ParseRequest(util_log.Logger, "fake", request, nil, nil, ParseLokiRequest, tracker)
structuredMetadataBytesReceived := int(structuredMetadataBytesReceivedStats.Value()["total"].(int64)) - previousStructuredMetadataBytesReceived
previousStructuredMetadataBytesReceived += structuredMetadataBytesReceived
@@ -210,7 +221,7 @@ func TestParseRequest(t *testing.T) {
previousLinesReceived += linesReceived
if test.valid {
- assert.Nil(t, err, "Should not give error for %d", index)
+ assert.NoErrorf(t, err, "Should not give error for %d", index)
assert.NotNil(t, data, "Should give data for %d", index)
require.Equal(t, test.expectedStructuredMetadataBytes, structuredMetadataBytesReceived)
require.Equal(t, test.expectedBytes, bytesReceived)
@@ -218,8 +229,9 @@ func TestParseRequest(t *testing.T) {
require.Equal(t, float64(test.expectedStructuredMetadataBytes), testutil.ToFloat64(structuredMetadataBytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(test.expectedBytes), testutil.ToFloat64(bytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(test.expectedLines), testutil.ToFloat64(linesIngested.WithLabelValues("fake")))
+ require.InDeltaMapValuesf(t, test.expectedBytesUsageTracker, tracker.receivedBytes, 0.0, "%s != %s", test.expectedBytesUsageTracker, tracker.receivedBytes)
} else {
- assert.NotNil(t, err, "Should give error for %d", index)
+ assert.Errorf(t, err, "Should give error for %d", index)
assert.Nil(t, data, "Should not give data for %d", index)
require.Equal(t, 0, structuredMetadataBytesReceived)
require.Equal(t, 0, bytesReceived)
@@ -231,3 +243,25 @@ func TestParseRequest(t *testing.T) {
})
}
}
+
+type MockCustomTracker struct {
+ receivedBytes map[string]float64
+ discardedBytes map[string]float64
+}
+
+func NewMockTracker() *MockCustomTracker {
+ return &MockCustomTracker{
+ receivedBytes: map[string]float64{},
+ discardedBytes: map[string]float64{},
+ }
+}
+
+// DiscardedBytesAdd implements CustomTracker.
+func (t *MockCustomTracker) DiscardedBytesAdd(_, _ string, labels labels.Labels, value float64) {
+ t.discardedBytes[labels.String()] += value
+}
+
+// ReceivedBytesAdd implements CustomTracker.
+func (t *MockCustomTracker) ReceivedBytesAdd(_ string, _ time.Duration, labels labels.Labels, value float64) {
+ t.receivedBytes[labels.String()] += value
+}
diff --git a/pkg/loghttp/push/usage_tracker.go b/pkg/loghttp/push/usage_tracker.go
new file mode 100644
index 000000000000..ab84da5c6acc
--- /dev/null
+++ b/pkg/loghttp/push/usage_tracker.go
@@ -0,0 +1,16 @@
+package push
+
+import (
+ "time"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+type UsageTracker interface {
+
+ // ReceivedBytesAdd records ingested bytes by tenant, retention period and labels.
+ ReceivedBytesAdd(tenant string, retentionPeriod time.Duration, labels labels.Labels, value float64)
+
+ // DiscardedBytesAdd records discarded bytes by tenant and labels.
+ DiscardedBytesAdd(tenant, reason string, labels labels.Labels, value float64)
+}
diff --git a/pkg/logql/rangemapper.go b/pkg/logql/rangemapper.go
index 14cf76f1475a..f898e19d2ea1 100644
--- a/pkg/logql/rangemapper.go
+++ b/pkg/logql/rangemapper.go
@@ -61,7 +61,7 @@ type RangeMapper struct {
splitAlignTs time.Time
}
-// NewRangeMapperWithSplitAlign is similar to `NewRangeMapper` except it accepts additonal `splitAlign` argument and used to
+// NewRangeMapperWithSplitAlign is similar to `NewRangeMapper` except it accepts additional `splitAlign` argument and used to
// align the subqueries generated according to that. Look at `rangeSplitAlign` method for more information.
func NewRangeMapperWithSplitAlign(interval time.Duration, splitAlign time.Time, metrics *MapperMetrics, stats *MapperStats) (RangeMapper, error) {
rm, err := NewRangeMapper(interval, metrics, stats)
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index d8ee613f6108..63477495f129 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -38,6 +38,7 @@ import (
"github.com/grafana/loki/pkg/distributor"
"github.com/grafana/loki/pkg/ingester"
ingester_client "github.com/grafana/loki/pkg/ingester/client"
+ "github.com/grafana/loki/pkg/loghttp/push"
"github.com/grafana/loki/pkg/loki/common"
"github.com/grafana/loki/pkg/lokifrontend"
"github.com/grafana/loki/pkg/lokifrontend/frontend/transport"
@@ -332,6 +333,8 @@ type Loki struct {
Codec Codec
Metrics *server.Metrics
+
+ UsageTracker push.UsageTracker
}
// New makes a new Loki.
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 5b73be62ca51..0e479204cb63 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -323,6 +323,7 @@ func (t *Loki) initDistributor() (services.Service, error) {
prometheus.DefaultRegisterer,
t.Cfg.MetricsNamespace,
t.Tee,
+ t.UsageTracker,
logger,
)
if err != nil {
From cb7e016d64da83d16412ad0ec7ab8119ec982813 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Mon, 26 Feb 2024 20:47:38 +0100
Subject: [PATCH 119/130] chore(blooms): Populate blocks cache on startup
(#12056)
Signed-off-by: Christian Haudum
---
pkg/loki/modules.go | 2 +
.../stores/shipper/bloomshipper/cache.go | 29 +++++++
.../stores/shipper/bloomshipper/cache_test.go | 82 +++++++++++++++++++
.../stores/shipper/bloomshipper/fetcher.go | 8 +-
.../stores/shipper/bloomshipper/interval.go | 31 +++++++
.../stores/shipper/bloomshipper/resolver.go | 39 +++++++++
.../shipper/bloomshipper/resolver_test.go | 54 ++++++++++++
7 files changed, 243 insertions(+), 2 deletions(-)
create mode 100644 pkg/storage/stores/shipper/bloomshipper/resolver_test.go
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 0e479204cb63..3623a69ba507 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -673,6 +673,8 @@ func (t *Loki) initBloomStore() (services.Service, error) {
var blocksCache cache.TypedCache[string, bloomshipper.BlockDirectory]
if bsCfg.BlocksCache.IsEnabled() {
blocksCache = bloomshipper.NewBlocksCache(bsCfg.BlocksCache, reg, logger)
+ err = bloomshipper.LoadBlocksDirIntoCache(t.Cfg.StorageConfig.BloomShipperConfig.WorkingDirectory, blocksCache, logger)
+ level.Warn(logger).Log("msg", "failed to preload blocks cache", "err", err)
}
t.BloomStore, err = bloomshipper.NewBloomStore(t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.clientMetrics, metasCache, blocksCache, logger)
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go
index d249cb68ce56..dd2538902bd9 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache.go
@@ -1,9 +1,12 @@
package bloomshipper
import (
+ "context"
"fmt"
+ "io/fs"
"os"
"path"
+ "path/filepath"
"time"
"github.com/go-kit/log"
@@ -49,6 +52,32 @@ func NewBlocksCache(cfg cache.EmbeddedCacheConfig, reg prometheus.Registerer, lo
})
}
+func LoadBlocksDirIntoCache(path string, c cache.TypedCache[string, BlockDirectory], logger log.Logger) error {
+ keys, values := loadBlockDirectories(path, logger)
+ return c.Store(context.Background(), keys, values)
+}
+
+func loadBlockDirectories(path string, logger log.Logger) (keys []string, values []BlockDirectory) {
+ resolver := NewPrefixedResolver(path, defaultKeyResolver{})
+ _ = filepath.WalkDir(path, func(filename string, dirEntry fs.DirEntry, _ error) error {
+ if !dirEntry.IsDir() {
+ return nil
+ }
+ ref, err := resolver.ParseBlockKey(key(filename))
+ if err != nil {
+ return nil
+ }
+ if ok, clean := isBlockDir(filename, logger); ok {
+ keys = append(keys, resolver.Block(ref).Addr())
+ values = append(values, NewBlockDirectory(ref, filename, logger))
+ } else {
+ _ = clean(filename)
+ }
+ return nil
+ })
+ return
+}
+
func calculateBlockDirectorySize(entry *cache.Entry[string, BlockDirectory]) uint64 {
value := entry.Value
bloomFileStats, _ := os.Lstat(path.Join(value.Path, v1.BloomFileName))
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
index dc078ab702c0..d2b92ed4128e 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
@@ -1,14 +1,61 @@
package bloomshipper
import (
+ "context"
+ "os"
+ "path/filepath"
+ "sync"
"testing"
"time"
"github.com/go-kit/log"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
)
+type mockCache[K comparable, V any] struct {
+ sync.Mutex
+ cache map[K]V
+}
+
+func (m *mockCache[K, V]) Store(_ context.Context, keys []K, values []V) error {
+ m.Lock()
+ defer m.Unlock()
+ for i := range keys {
+ m.cache[keys[i]] = values[i]
+ }
+ return nil
+}
+
+func (m *mockCache[K, V]) Fetch(_ context.Context, keys []K) (found []K, values []V, missing []K, err error) {
+ m.Lock()
+ defer m.Unlock()
+ for _, key := range keys {
+ buf, ok := m.cache[key]
+ if ok {
+ found = append(found, key)
+ values = append(values, buf)
+ } else {
+ missing = append(missing, key)
+ }
+ }
+ return
+}
+
+func (m *mockCache[K, V]) Stop() {
+}
+
+func (m *mockCache[K, V]) GetCacheType() stats.CacheType {
+ return "mock"
+}
+
+func newTypedMockCache[K comparable, V any]() *mockCache[K, V] {
+ return &mockCache[K, V]{
+ cache: make(map[K]V),
+ }
+}
+
func TestBlockDirectory_Cleanup(t *testing.T) {
checkInterval := 50 * time.Millisecond
timeout := 200 * time.Millisecond
@@ -65,3 +112,38 @@ func Test_ClosableBlockQuerier(t *testing.T) {
require.NoError(t, querier.Close())
require.Equal(t, int32(0), blockDir.refCount.Load())
}
+
+func Test_LoadBlocksDirIntoCache(t *testing.T) {
+ logger := log.NewNopLogger()
+ wd := t.TempDir()
+
+ // plain file
+ fp, _ := os.Create(filepath.Join(wd, "regular-file.tar.gz"))
+ fp.Close()
+
+ // invalid directory
+ _ = os.MkdirAll(filepath.Join(wd, "not/a/valid/blockdir"), 0o755)
+
+ // empty block directory
+ fn1 := "bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-abcd"
+ _ = os.MkdirAll(filepath.Join(wd, fn1), 0o755)
+
+ // valid block directory
+ fn2 := "bloom/table_2/tenant/blocks/0000000000010000-000000000001ffff/0-3600000-abcd"
+ _ = os.MkdirAll(filepath.Join(wd, fn2), 0o755)
+ fp, _ = os.Create(filepath.Join(wd, fn2, "bloom"))
+ fp.Close()
+ fp, _ = os.Create(filepath.Join(wd, fn2, "series"))
+ fp.Close()
+
+ c := newTypedMockCache[string, BlockDirectory]()
+ err := LoadBlocksDirIntoCache(wd, c, logger)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(c.cache))
+
+ key := filepath.Join(wd, fn2) + ".tar.gz"
+ blockDir, found := c.cache[key]
+ require.True(t, found)
+ require.Equal(t, filepath.Join(wd, fn2), blockDir.Path)
+}
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index 366b37ec96dd..bda368dae8d7 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -240,9 +240,13 @@ func (f *Fetcher) loadBlocksFromFS(_ context.Context, refs []BlockRef) ([]BlockD
var noopClean = func(string) error { return nil }
func (f *Fetcher) isBlockDir(path string) (bool, func(string) error) {
+ return isBlockDir(path, f.logger)
+}
+
+func isBlockDir(path string, logger log.Logger) (bool, func(string) error) {
info, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
- level.Warn(f.logger).Log("msg", "path does not exist", "path", path)
+ level.Warn(logger).Log("msg", "path does not exist", "path", path)
return false, noopClean
}
if !info.IsDir() {
@@ -253,7 +257,7 @@ func (f *Fetcher) isBlockDir(path string) (bool, func(string) error) {
filepath.Join(path, v1.SeriesFileName),
} {
if _, err := os.Stat(file); err != nil && os.IsNotExist(err) {
- level.Warn(f.logger).Log("msg", "path does not contain required file", "path", path, "file", file)
+ level.Warn(logger).Log("msg", "path does not contain required file", "path", path, "file", file)
return false, os.RemoveAll
}
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/interval.go b/pkg/storage/stores/shipper/bloomshipper/interval.go
index 2ddb3716e499..430bde1a7680 100644
--- a/pkg/storage/stores/shipper/bloomshipper/interval.go
+++ b/pkg/storage/stores/shipper/bloomshipper/interval.go
@@ -3,6 +3,8 @@ package bloomshipper
import (
"fmt"
"hash"
+ "strconv"
+ "strings"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
@@ -57,3 +59,32 @@ func (i Interval) Overlaps(target Interval) bool {
func (i Interval) Within(target Interval) bool {
return i.Start >= target.Start && i.End <= target.End
}
+
+// ParseBoundsFromAddr parses a fingerprint bounds from a string
+// Does not support negative times (times prior to Unix epoch).
+func ParseIntervalFromAddr(s string) (Interval, error) {
+ parts := strings.Split(s, "-")
+ return ParseIntervalFromParts(parts[0], parts[1])
+}
+
+// ParseIntervalFromParts parses a fingerprint bounds already separated strings
+func ParseIntervalFromParts(a, b string) (Interval, error) {
+ minTs, err := ParseTime(a)
+ if err != nil {
+ return Interval{}, fmt.Errorf("error parsing minTimestamp %s : %w", a, err)
+ }
+ maxTs, err := ParseTime(b)
+ if err != nil {
+ return Interval{}, fmt.Errorf("error parsing maxTimestamp %s : %w", b, err)
+ }
+ return NewInterval(minTs, maxTs), nil
+}
+
+// ParseFingerprint parses the input string into a model.Time.
+func ParseTime(s string) (model.Time, error) {
+ num, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return model.Time(num), nil
+}
diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver.go b/pkg/storage/stores/shipper/bloomshipper/resolver.go
index 7d224b9f0139..b88a48758d63 100644
--- a/pkg/storage/stores/shipper/bloomshipper/resolver.go
+++ b/pkg/storage/stores/shipper/bloomshipper/resolver.go
@@ -29,6 +29,7 @@ type KeyResolver interface {
Meta(MetaRef) Location
ParseMetaKey(Location) (MetaRef, error)
Block(BlockRef) Location
+ ParseBlockKey(Location) (BlockRef, error)
}
type defaultKeyResolver struct{}
@@ -85,6 +86,44 @@ func (defaultKeyResolver) Block(ref BlockRef) Location {
}
}
+func (defaultKeyResolver) ParseBlockKey(loc Location) (BlockRef, error) {
+ dir, fn := path.Split(loc.Addr())
+ fnParts := strings.Split(fn, "-")
+ if len(fnParts) != 3 {
+ return BlockRef{}, fmt.Errorf("failed to split filename parts of block key %s : len must be 3, but was %d", loc, len(fnParts))
+ }
+ interval, err := ParseIntervalFromParts(fnParts[0], fnParts[1])
+ if err != nil {
+ return BlockRef{}, fmt.Errorf("failed to parse bounds of meta key %s : %w", loc, err)
+ }
+ withoutExt := strings.TrimSuffix(fnParts[2], extTarGz)
+ checksum, err := strconv.ParseUint(withoutExt, 16, 64)
+ if err != nil {
+ return BlockRef{}, fmt.Errorf("failed to parse checksum of meta key %s : %w", loc, err)
+ }
+
+ dirParts := strings.Split(path.Clean(dir), "/")
+ if len(dirParts) < 5 {
+ return BlockRef{}, fmt.Errorf("directory parts count must be 5 or greater, but was %d : [%s]", len(dirParts), loc)
+ }
+
+ bounds, err := v1.ParseBoundsFromAddr(dirParts[len(dirParts)-1])
+ if err != nil {
+ return BlockRef{}, fmt.Errorf("failed to parse bounds of block key %s : %w", loc, err)
+ }
+
+ return BlockRef{
+ Ref: Ref{
+ TenantID: dirParts[len(dirParts)-3],
+ TableName: dirParts[len(dirParts)-4],
+ Bounds: bounds,
+ StartTimestamp: interval.Start,
+ EndTimestamp: interval.End,
+ Checksum: uint32(checksum),
+ },
+ }, nil
+}
+
type PrefixedResolver struct {
prefix string
KeyResolver
diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver_test.go b/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
new file mode 100644
index 000000000000..890c5b03ff3f
--- /dev/null
+++ b/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
@@ -0,0 +1,54 @@
+package bloomshipper
+
+import (
+ "testing"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/stretchr/testify/require"
+)
+
+func TestResolver_ParseMetaKey(t *testing.T) {
+ r := defaultKeyResolver{}
+ ref := MetaRef{
+ Ref: Ref{
+ TenantID: "tenant",
+ TableName: "table_1",
+ Bounds: v1.NewBounds(0x0000, 0xffff),
+ Checksum: 43981,
+ },
+ }
+
+ // encode block ref as string
+ loc := r.Meta(ref)
+ path := loc.LocalPath()
+ require.Equal(t, "bloom/table_1/tenant/metas/0000000000000000-000000000000ffff-abcd.json", path)
+
+ // parse encoded string into block ref
+ parsed, err := r.ParseMetaKey(key(path))
+ require.NoError(t, err)
+ require.Equal(t, ref, parsed)
+}
+
+func TestResolver_ParseBlockKey(t *testing.T) {
+ r := defaultKeyResolver{}
+ ref := BlockRef{
+ Ref: Ref{
+ TenantID: "tenant",
+ TableName: "table_1",
+ Bounds: v1.NewBounds(0x0000, 0xffff),
+ StartTimestamp: 0,
+ EndTimestamp: 3600000,
+ Checksum: 43981,
+ },
+ }
+
+ // encode block ref as string
+ loc := r.Block(ref)
+ path := loc.LocalPath()
+ require.Equal(t, "bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-abcd.tar.gz", path)
+
+ // parse encoded string into block ref
+ parsed, err := r.ParseBlockKey(key(path))
+ require.NoError(t, err)
+ require.Equal(t, ref, parsed)
+}
From 60f75f33958fd144e86e49a047988564384a59b4 Mon Sep 17 00:00:00 2001
From: Owen Diehl
Date: Mon, 26 Feb 2024 11:51:46 -0800
Subject: [PATCH 120/130] fix(blooms): closes chunk iterator (#12053)
---
pkg/storage/bloom/v1/bloom_tokenizer.go | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go
index 59bb2644f87e..9b80fb92de88 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer.go
@@ -6,7 +6,9 @@ import (
"time"
"github.com/go-kit/log/level"
+ "github.com/pkg/errors"
+ "github.com/grafana/dskit/multierror"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/util/encoding"
@@ -94,18 +96,19 @@ func (bt *BloomTokenizer) Populate(swb *SeriesWithBloom, chks Iterator[ChunkRefW
var tokenBuf []byte
var prefixLn int
+ // Iterate over chunks
for chks.Next() && chks.Err() == nil {
chk := chks.At()
itr := chk.Itr
tokenBuf, prefixLn = prefixedToken(bt.lineTokenizer.N, chk.Ref, tokenBuf)
- defer itr.Close()
-
+ // Iterate over lines in the chunk
for itr.Next() && itr.Error() == nil {
// TODO(owen-d): rather than iterate over the line twice, once for prefixed tokenizer & once for
// raw tokenizer, we could iterate once and just return (prefix, token) pairs from the tokenizer.
// Double points for them being different-ln references to the same data.
- chunkTokenizer := NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(itr.Entry().Line))
+ line := itr.Entry().Line
+ chunkTokenizer := NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(line))
for chunkTokenizer.Next() {
tok := chunkTokenizer.At()
if tok != nil {
@@ -123,7 +126,7 @@ func (bt *BloomTokenizer) Populate(swb *SeriesWithBloom, chks Iterator[ChunkRefW
}
}
}
- lineTokenizer := bt.lineTokenizer.Tokens(itr.Entry().Line)
+ lineTokenizer := bt.lineTokenizer.Tokens(line)
for lineTokenizer.Next() {
tok := lineTokenizer.At()
if tok != nil {
@@ -142,11 +145,19 @@ func (bt *BloomTokenizer) Populate(swb *SeriesWithBloom, chks Iterator[ChunkRefW
}
}
+ var es multierror.MultiError
+ if err := itr.Close(); err != nil {
+ es.Add(errors.Wrapf(err, "error closing chunk: %#v", chk.Ref))
+ }
if err := itr.Error(); err != nil {
- return fmt.Errorf("error iterating chunk: %#v, %w", chk.Ref, err)
+ es.Add(errors.Wrapf(err, "error iterating chunk: %#v", chk.Ref))
+ }
+ if combined := es.Err(); combined != nil {
+ return combined
}
swb.Series.Chunks = append(swb.Series.Chunks, chk.Ref)
}
+
if err := chks.Err(); err != nil {
level.Error(util_log.Logger).Log("msg", "error downloading chunks batch", "err", err)
return fmt.Errorf("error downloading chunks batch: %w", err)
From b1a61b3df60f11b6cee7d44b0fba0d12d59886a0 Mon Sep 17 00:00:00 2001
From: Shantanu Alshi
Date: Tue, 27 Feb 2024 15:52:33 +0530
Subject: [PATCH 121/130] chore: Change local setup under dev to use tsdb and
v13 schema (#12057)
Co-authored-by: Christian Haudum
---
Makefile | 2 +-
pkg/ingester/index/bitprefix_test.go | 6 +++---
pkg/ingester/index/index_test.go | 6 +++---
.../.dockerignore | 0
.../.gitignore | 0
.../README.md | 10 +++++++---
.../compose-down.sh | 0
.../compose-up.sh | 0
.../config/datasource.yaml | 0
.../config/loki.yaml | 14 ++++++++++----
.../dev.dockerfile | 6 +++---
.../docker-compose.yml | 6 +++---
12 files changed, 30 insertions(+), 20 deletions(-)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/.dockerignore (100%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/.gitignore (100%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/README.md (86%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/compose-down.sh (100%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/compose-up.sh (100%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/config/datasource.yaml (100%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/config/loki.yaml (91%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/dev.dockerfile (56%)
rename tools/dev/{loki-boltdb-storage-s3 => loki-tsdb-storage-s3}/docker-compose.yml (98%)
diff --git a/Makefile b/Makefile
index 865e16ec3ba7..4789bf7e319d 100644
--- a/Makefile
+++ b/Makefile
@@ -816,7 +816,7 @@ validate-example-configs: loki
for f in $$(grep -rL $(EXAMPLES_SKIP_VALIDATION_FLAG) $(EXAMPLES_YAML_PATH)/*.yaml); do echo "Validating provided example config: $$f" && ./cmd/loki/loki -config.file=$$f -verify-config || exit 1; done
validate-dev-cluster-config: loki
- ./cmd/loki/loki -config.file=./tools/dev/loki-boltdb-storage-s3/config/loki.yaml -verify-config
+ ./cmd/loki/loki -config.file=./tools/dev/loki-tsdb-storage-s3/config/loki.yaml -verify-config
# Dynamically generate ./docs/sources/configure/examples.md using the example configs that we provide.
# This target should be run if any of our example configs change.
diff --git a/pkg/ingester/index/bitprefix_test.go b/pkg/ingester/index/bitprefix_test.go
index 00640f42a23a..d4afb9f63572 100644
--- a/pkg/ingester/index/bitprefix_test.go
+++ b/pkg/ingester/index/bitprefix_test.go
@@ -90,9 +90,9 @@ func Test_BitPrefixDeleteAddLoopkup(t *testing.T) {
func Test_BitPrefix_hash_mapping(t *testing.T) {
lbs := labels.Labels{
- labels.Label{Name: "compose_project", Value: "loki-boltdb-storage-s3"},
+ labels.Label{Name: "compose_project", Value: "loki-tsdb-storage-s3"},
labels.Label{Name: "compose_service", Value: "ingester-2"},
- labels.Label{Name: "container_name", Value: "loki-boltdb-storage-s3_ingester-2_1"},
+ labels.Label{Name: "container_name", Value: "loki-tsdb-storage-s3_ingester-2_1"},
labels.Label{Name: "filename", Value: "/var/log/docker/790fef4c6a587c3b386fe85c07e03f3a1613f4929ca3abaa4880e14caadb5ad1/json.log"},
labels.Label{Name: "host", Value: "docker-desktop"},
labels.Label{Name: "source", Value: "stderr"},
@@ -115,7 +115,7 @@ func Test_BitPrefix_hash_mapping(t *testing.T) {
res, err := ii.Lookup(
[]*labels.Matcher{{Type: labels.MatchEqual,
Name: "compose_project",
- Value: "loki-boltdb-storage-s3"}},
+ Value: "loki-tsdb-storage-s3"}},
&astmapper.ShardAnnotation{
Shard: int(expShard),
Of: requestedFactor,
diff --git a/pkg/ingester/index/index_test.go b/pkg/ingester/index/index_test.go
index 3cd5d0873469..bc6aaeebf344 100644
--- a/pkg/ingester/index/index_test.go
+++ b/pkg/ingester/index/index_test.go
@@ -95,9 +95,9 @@ func TestDeleteAddLoopkup(t *testing.T) {
func Test_hash_mapping(t *testing.T) {
lbs := labels.Labels{
- labels.Label{Name: "compose_project", Value: "loki-boltdb-storage-s3"},
+ labels.Label{Name: "compose_project", Value: "loki-tsdb-storage-s3"},
labels.Label{Name: "compose_service", Value: "ingester-2"},
- labels.Label{Name: "container_name", Value: "loki-boltdb-storage-s3_ingester-2_1"},
+ labels.Label{Name: "container_name", Value: "loki-tsdb-storage-s3_ingester-2_1"},
labels.Label{Name: "filename", Value: "/var/log/docker/790fef4c6a587c3b386fe85c07e03f3a1613f4929ca3abaa4880e14caadb5ad1/json.log"},
labels.Label{Name: "host", Value: "docker-desktop"},
labels.Label{Name: "source", Value: "stderr"},
@@ -108,7 +108,7 @@ func Test_hash_mapping(t *testing.T) {
ii := NewWithShards(shard)
ii.Add(logproto.FromLabelsToLabelAdapters(lbs), 1)
- res, err := ii.Lookup([]*labels.Matcher{{Type: labels.MatchEqual, Name: "compose_project", Value: "loki-boltdb-storage-s3"}}, &astmapper.ShardAnnotation{Shard: int(labelsSeriesIDHash(lbs) % 16), Of: 16})
+ res, err := ii.Lookup([]*labels.Matcher{{Type: labels.MatchEqual, Name: "compose_project", Value: "loki-tsdb-storage-s3"}}, &astmapper.ShardAnnotation{Shard: int(labelsSeriesIDHash(lbs) % 16), Of: 16})
require.NoError(t, err)
require.Len(t, res, 1)
require.Equal(t, model.Fingerprint(1), res[0])
diff --git a/tools/dev/loki-boltdb-storage-s3/.dockerignore b/tools/dev/loki-tsdb-storage-s3/.dockerignore
similarity index 100%
rename from tools/dev/loki-boltdb-storage-s3/.dockerignore
rename to tools/dev/loki-tsdb-storage-s3/.dockerignore
diff --git a/tools/dev/loki-boltdb-storage-s3/.gitignore b/tools/dev/loki-tsdb-storage-s3/.gitignore
similarity index 100%
rename from tools/dev/loki-boltdb-storage-s3/.gitignore
rename to tools/dev/loki-tsdb-storage-s3/.gitignore
diff --git a/tools/dev/loki-boltdb-storage-s3/README.md b/tools/dev/loki-tsdb-storage-s3/README.md
similarity index 86%
rename from tools/dev/loki-boltdb-storage-s3/README.md
rename to tools/dev/loki-tsdb-storage-s3/README.md
index b54372eab399..63041bb9cd05 100644
--- a/tools/dev/loki-boltdb-storage-s3/README.md
+++ b/tools/dev/loki-tsdb-storage-s3/README.md
@@ -7,7 +7,7 @@ It runs the current code base in the repository this means you can debug new fea
To start the stack simply run:
```bash
-./tools/dev/loki-boltdb-storage-s3/compose-up.sh
+./tools/dev/loki-tsdb-storage-s3/compose-up.sh
```
You can then access grafana locally with http://localhost:3000 (default account admin/admin). The grafana container should already have the datasource to correctly query the frontend.
@@ -15,7 +15,7 @@ You can then access grafana locally with http://localhost:3000 (default account
To tear it down use:
```bash
-./tools/dev/loki-boltdb-storage-s3/compose-down.sh
+./tools/dev/loki-tsdb-storage-s3/compose-down.sh
```
> On MacOS :apple: docker can get stuck when restarting the stack, you can restart docker to workaround the problem :shrug:
@@ -58,7 +58,7 @@ If you use vs-code, you can add this snippet bellow in your [`launch.json`](http
],
"port": 18002,
"host": "127.0.0.1",
- "cwd": "${workspaceFolder}/tools/dev/loki-boltdb-storage-s3/loki",
+ "cwd": "${workspaceFolder}/tools/dev/loki-tsdb-storage-s3/loki",
"remotePath": "/loki/loki",
"showLog": true,
"trace": "log",
@@ -67,3 +67,7 @@ If you use vs-code, you can add this snippet bellow in your [`launch.json`](http
```
Then you can debug `ingester-1` with the `Launch Loki remote` configuration within the debugging tab.
+
+### GoLand IDE
+
+If you use the [GoLand](https://www.jetbrains.com/go/) IDE, just create a Go remote debug configuration and use the appropriate port with the process you wish to debug.
\ No newline at end of file
diff --git a/tools/dev/loki-boltdb-storage-s3/compose-down.sh b/tools/dev/loki-tsdb-storage-s3/compose-down.sh
similarity index 100%
rename from tools/dev/loki-boltdb-storage-s3/compose-down.sh
rename to tools/dev/loki-tsdb-storage-s3/compose-down.sh
diff --git a/tools/dev/loki-boltdb-storage-s3/compose-up.sh b/tools/dev/loki-tsdb-storage-s3/compose-up.sh
similarity index 100%
rename from tools/dev/loki-boltdb-storage-s3/compose-up.sh
rename to tools/dev/loki-tsdb-storage-s3/compose-up.sh
diff --git a/tools/dev/loki-boltdb-storage-s3/config/datasource.yaml b/tools/dev/loki-tsdb-storage-s3/config/datasource.yaml
similarity index 100%
rename from tools/dev/loki-boltdb-storage-s3/config/datasource.yaml
rename to tools/dev/loki-tsdb-storage-s3/config/datasource.yaml
diff --git a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml b/tools/dev/loki-tsdb-storage-s3/config/loki.yaml
similarity index 91%
rename from tools/dev/loki-boltdb-storage-s3/config/loki.yaml
rename to tools/dev/loki-tsdb-storage-s3/config/loki.yaml
index ea0cf186e269..4ab4056956d3 100644
--- a/tools/dev/loki-boltdb-storage-s3/config/loki.yaml
+++ b/tools/dev/loki-tsdb-storage-s3/config/loki.yaml
@@ -1,6 +1,6 @@
auth_enabled: true
common:
- compactor_address: compactor:8006
+ compactor_address: http://compactor:8006
chunk_store_config:
chunk_cache_config:
memcached:
@@ -29,6 +29,7 @@ frontend:
log_queries_longer_than: 5s
max_outstanding_per_tenant: 512
encoding: protobuf
+ scheduler_address: query-scheduler:9009
frontend_worker:
grpc_client_config:
max_send_msg_size: 1.048576e+08
@@ -103,10 +104,10 @@ schema_config:
- from: "2020-07-30"
index:
period: 24h
- prefix: loki_boltdb_shipper_index_
+ prefix: index_
object_store: s3
- schema: v11
- store: boltdb-shipper
+ schema: v13
+ store: tsdb
row_shards: 4
server:
graceful_shutdown_timeout: 5s
@@ -134,6 +135,11 @@ storage_config:
parallelism: 10
memcached_client:
addresses: memcached:11211
+ tsdb_shipper:
+ active_index_directory: /data/tsdb-index
+ cache_location: /data/tsdb-cache
+ index_gateway_client:
+ server_address: index-gateway:9008
table_manager:
creation_grace_period: 3h
poll_interval: 10m
diff --git a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile b/tools/dev/loki-tsdb-storage-s3/dev.dockerfile
similarity index 56%
rename from tools/dev/loki-boltdb-storage-s3/dev.dockerfile
rename to tools/dev/loki-tsdb-storage-s3/dev.dockerfile
index f975c76d102c..cfe916feebee 100644
--- a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile
+++ b/tools/dev/loki-tsdb-storage-s3/dev.dockerfile
@@ -1,8 +1,8 @@
-FROM golang:1.20.4
+FROM golang:1.21.3
ENV CGO_ENABLED=0
-RUN go install github.com/go-delve/delve/cmd/dlv@v1.21.1
+RUN go install github.com/go-delve/delve/cmd/dlv@v1.22.1
-FROM alpine:3.18.5
+FROM alpine:3.19.1
RUN mkdir /loki
WORKDIR /loki
diff --git a/tools/dev/loki-boltdb-storage-s3/docker-compose.yml b/tools/dev/loki-tsdb-storage-s3/docker-compose.yml
similarity index 98%
rename from tools/dev/loki-boltdb-storage-s3/docker-compose.yml
rename to tools/dev/loki-tsdb-storage-s3/docker-compose.yml
index 4af04b766449..24846bbb321e 100644
--- a/tools/dev/loki-boltdb-storage-s3/docker-compose.yml
+++ b/tools/dev/loki-tsdb-storage-s3/docker-compose.yml
@@ -15,9 +15,9 @@ services:
minio:
logging:
<<: *logging
- image: minio/minio:RELEASE.2022-03-11T23-57-45Z
+ image: minio/minio:RELEASE.2024-02-24T17-11-14Z
entrypoint: sh
- command: -c 'mkdir -p /data/loki && /opt/bin/minio server --console-address :9001 /data'
+ command: -c 'mkdir -p /data/loki && /usr/bin/minio server --console-address :9001 /data'
environment:
- MINIO_ACCESS_KEY=loki
- MINIO_SECRET_KEY=supersecret
@@ -30,7 +30,7 @@ services:
memcached:
logging:
<<: *logging
- image: memcached:1.6
+ image: memcached:1.6.17-alpine
jaeger:
logging:
From 609fdb321491c88bda25b8fcb399d479df542c5d Mon Sep 17 00:00:00 2001
From: Danny Kopping
Date: Tue, 27 Feb 2024 14:41:59 +0200
Subject: [PATCH 122/130] feat(querier/query-frontend): track and log
congestion control latency (#12058)
Signed-off-by: Danny Kopping
---
pkg/logql/metrics.go | 2 +
pkg/logqlmodel/stats/context.go | 13 ++
pkg/logqlmodel/stats/stats.pb.go | 201 +++++++++++-------
pkg/logqlmodel/stats/stats.proto | 3 +
pkg/querier/queryrange/codec_test.go | 3 +
pkg/querier/queryrange/prometheus_test.go | 2 +
.../chunk/client/congestion/controller.go | 6 +
.../client/congestion/controller_test.go | 14 +-
pkg/util/marshal/legacy/marshal_test.go | 2 +
pkg/util/marshal/marshal_test.go | 2 +
10 files changed, 164 insertions(+), 84 deletions(-)
diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go
index f9b9d1a8f952..d5a3f38a21f3 100644
--- a/pkg/logql/metrics.go
+++ b/pkg/logql/metrics.go
@@ -189,6 +189,8 @@ func RecordRangeAndInstantQueryMetrics(
"ingester_chunk_decompressed_bytes", humanizeBytes(uint64(stats.Ingester.Store.Chunk.GetDecompressedBytes())),
// Total lines post filtering.
"ingester_post_filter_lines", stats.Ingester.Store.Chunk.GetPostFilterLines(),
+ // Time spent being blocked on congestion control.
+ "congestion_control_latency", stats.CongestionControlLatency(),
}...)
logValues = append(logValues, tagsToKeyValues(queryTags)...)
diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go
index 1271fa6d9c21..05426840015c 100644
--- a/pkg/logqlmodel/stats/context.go
+++ b/pkg/logqlmodel/stats/context.go
@@ -96,6 +96,11 @@ func (c *Context) Ingester() Ingester {
}
}
+// Querier returns the store statistics accumulated so far.
+func (c *Context) Querier() Querier {
+ return c.querier
+}
+
// Caches returns the cache statistics accumulated so far.
func (c *Context) Caches() Caches {
return Caches{
@@ -279,6 +284,10 @@ func (r Result) ChunkRefsFetchTime() time.Duration {
return time.Duration(r.Querier.Store.ChunkRefsFetchTime + r.Ingester.Store.ChunkRefsFetchTime)
}
+func (r Result) CongestionControlLatency() time.Duration {
+ return time.Duration(r.Querier.Store.CongestionControlLatency)
+}
+
func (r Result) TotalDuplicates() int64 {
return r.Querier.Store.Chunk.TotalDuplicates + r.Ingester.Store.Chunk.TotalDuplicates
}
@@ -360,6 +369,10 @@ func (c *Context) AddChunkRefsFetchTime(i time.Duration) {
atomic.AddInt64(&c.store.ChunkRefsFetchTime, int64(i))
}
+func (c *Context) AddCongestionControlLatency(i time.Duration) {
+ atomic.AddInt64(&c.querier.Store.CongestionControlLatency, int64(i))
+}
+
func (c *Context) AddChunksDownloaded(i int64) {
atomic.AddInt64(&c.store.TotalChunksDownloaded, i)
}
diff --git a/pkg/logqlmodel/stats/stats.pb.go b/pkg/logqlmodel/stats/stats.pb.go
index 65f8f0f64238..f25143272599 100644
--- a/pkg/logqlmodel/stats/stats.pb.go
+++ b/pkg/logqlmodel/stats/stats.pb.go
@@ -476,6 +476,8 @@ type Store struct {
Chunk Chunk `protobuf:"bytes,4,opt,name=chunk,proto3" json:"chunk"`
// Time spent fetching chunk refs from index.
ChunkRefsFetchTime int64 `protobuf:"varint,5,opt,name=chunkRefsFetchTime,proto3" json:"chunkRefsFetchTime"`
+ // Time spent being blocked on congestion control.
+ CongestionControlLatency int64 `protobuf:"varint,6,opt,name=congestionControlLatency,proto3" json:"congestionControlLatency"`
}
func (m *Store) Reset() { *m = Store{} }
@@ -552,6 +554,13 @@ func (m *Store) GetChunkRefsFetchTime() int64 {
return 0
}
+func (m *Store) GetCongestionControlLatency() int64 {
+ if m != nil {
+ return m.CongestionControlLatency
+ }
+ return 0
+}
+
type Chunk struct {
// Total bytes processed but was already in memory (found in the headchunk). Includes structured metadata bytes.
HeadChunkBytes int64 `protobuf:"varint,4,opt,name=headChunkBytes,proto3" json:"headChunkBytes"`
@@ -781,85 +790,87 @@ func init() {
func init() { proto.RegisterFile("pkg/logqlmodel/stats/stats.proto", fileDescriptor_6cdfe5d2aea33ebb) }
var fileDescriptor_6cdfe5d2aea33ebb = []byte{
- // 1241 bytes of a gzipped FileDescriptorProto
+ // 1274 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0x4b, 0x6f, 0xe3, 0x54,
- 0x14, 0x8e, 0x27, 0xe3, 0xa4, 0xbd, 0x7d, 0xce, 0x6d, 0x87, 0xc9, 0x30, 0x92, 0x5d, 0x02, 0x23,
- 0x8a, 0x40, 0x8d, 0x78, 0x48, 0x08, 0xc4, 0x48, 0xc8, 0x1d, 0x2a, 0x55, 0x6a, 0x45, 0x39, 0x81,
- 0x0d, 0xac, 0x1c, 0xfb, 0x36, 0xb1, 0xea, 0xd8, 0xa9, 0x7d, 0x5d, 0xa6, 0x2b, 0xf8, 0x09, 0xec,
- 0xf9, 0x03, 0x6c, 0x58, 0xb1, 0x42, 0x62, 0xc7, 0x66, 0x96, 0x5d, 0xce, 0xca, 0xa2, 0xe9, 0x06,
- 0x79, 0x35, 0x12, 0x7f, 0x00, 0xdd, 0x47, 0x6c, 0x5f, 0xc7, 0x99, 0xe9, 0x26, 0xbe, 0xe7, 0x3b,
- 0xdf, 0x77, 0xee, 0xc3, 0xe7, 0x1c, 0xdf, 0xa0, 0x9d, 0xc9, 0xd9, 0xb0, 0xe7, 0x87, 0xc3, 0x73,
- 0x7f, 0x1c, 0xba, 0xc4, 0xef, 0xc5, 0xd4, 0xa6, 0xb1, 0xf8, 0xdd, 0x9b, 0x44, 0x21, 0x0d, 0xb1,
- 0xce, 0x8d, 0x37, 0xb7, 0x87, 0xe1, 0x30, 0xe4, 0x48, 0x8f, 0x8d, 0x84, 0xb3, 0xfb, 0x9f, 0x86,
- 0x5a, 0x40, 0xe2, 0xc4, 0xa7, 0xf8, 0x33, 0xd4, 0x8e, 0x93, 0xf1, 0xd8, 0x8e, 0x2e, 0x3b, 0xda,
- 0x8e, 0xb6, 0xbb, 0xf2, 0xd1, 0xfa, 0x9e, 0x08, 0xd3, 0x17, 0xa8, 0xb5, 0xf1, 0x3c, 0x35, 0x1b,
- 0x59, 0x6a, 0xce, 0x68, 0x30, 0x1b, 0x30, 0xe9, 0x79, 0x42, 0x22, 0x8f, 0x44, 0x9d, 0x3b, 0x8a,
- 0xf4, 0x1b, 0x81, 0x16, 0x52, 0x49, 0x83, 0xd9, 0x00, 0x3f, 0x41, 0x4b, 0x5e, 0x30, 0x24, 0x31,
- 0x25, 0x51, 0xa7, 0xc9, 0xb5, 0x1b, 0x52, 0x7b, 0x28, 0x61, 0x6b, 0x53, 0x8a, 0x73, 0x22, 0xe4,
- 0x23, 0xfc, 0x09, 0x6a, 0x39, 0xb6, 0x33, 0x22, 0x71, 0xe7, 0x2e, 0x17, 0xaf, 0x49, 0xf1, 0x3e,
- 0x07, 0xad, 0x35, 0x29, 0xd5, 0x39, 0x09, 0x24, 0xb7, 0xfb, 0xeb, 0x5d, 0xd4, 0x12, 0x0c, 0xfc,
- 0x21, 0xd2, 0x9d, 0x51, 0x12, 0x9c, 0xc9, 0x3d, 0xaf, 0x96, 0xf5, 0x25, 0x39, 0xa3, 0x80, 0x78,
- 0x30, 0x89, 0x17, 0xb8, 0xe4, 0x99, 0xdc, 0xeb, 0x02, 0x09, 0xa7, 0x80, 0x78, 0xb0, 0x65, 0x46,
- 0xfc, 0x94, 0xe5, 0x1e, 0x55, 0xcd, 0xba, 0xd4, 0x48, 0x0e, 0xc8, 0x27, 0xde, 0x47, 0x2b, 0x9c,
- 0x26, 0x5e, 0x90, 0xdc, 0xa1, 0x2a, 0xdd, 0x92, 0xd2, 0x32, 0x11, 0xca, 0x06, 0x3e, 0x40, 0xab,
- 0x17, 0xa1, 0x9f, 0x8c, 0x89, 0x8c, 0xa2, 0xd7, 0x44, 0xd9, 0x96, 0x51, 0x14, 0x26, 0x28, 0x16,
- 0x8b, 0x13, 0xb3, 0x57, 0x36, 0x5b, 0x4d, 0xeb, 0x55, 0x71, 0xca, 0x4c, 0x50, 0x2c, 0xb6, 0x29,
- 0xdf, 0x1e, 0x10, 0x5f, 0x86, 0x69, 0xbf, 0x6a, 0x53, 0x25, 0x22, 0x94, 0x0d, 0xfc, 0x03, 0xda,
- 0xf2, 0x82, 0x98, 0xda, 0x01, 0x3d, 0x26, 0x34, 0xf2, 0x1c, 0x19, 0x6c, 0xa9, 0x26, 0xd8, 0x23,
- 0x19, 0xac, 0x4e, 0x00, 0x75, 0x60, 0xf7, 0xcf, 0x16, 0x6a, 0xcb, 0x9c, 0xc7, 0xdf, 0xa1, 0x07,
- 0x83, 0x4b, 0x4a, 0xe2, 0x93, 0x28, 0x74, 0x48, 0x1c, 0x13, 0xf7, 0x84, 0x44, 0x7d, 0xe2, 0x84,
- 0x81, 0xcb, 0x13, 0xa6, 0x69, 0x3d, 0xca, 0x52, 0x73, 0x11, 0x05, 0x16, 0x39, 0x58, 0x58, 0xdf,
- 0x0b, 0x6a, 0xc3, 0xde, 0x29, 0xc2, 0x2e, 0xa0, 0xc0, 0x22, 0x07, 0x3e, 0x44, 0x5b, 0x34, 0xa4,
- 0xb6, 0x6f, 0x29, 0xd3, 0xf2, 0x9c, 0x6b, 0x5a, 0x0f, 0xd8, 0x21, 0xd4, 0xb8, 0xa1, 0x0e, 0xcc,
- 0x43, 0x1d, 0x29, 0x53, 0xf1, 0x1c, 0x2c, 0x87, 0x52, 0xdd, 0x50, 0x07, 0xe2, 0x5d, 0xb4, 0x44,
- 0x9e, 0x11, 0xe7, 0x5b, 0x6f, 0x4c, 0x78, 0xf6, 0x69, 0xd6, 0x2a, 0xab, 0xe6, 0x19, 0x06, 0xf9,
- 0x08, 0xbf, 0x8f, 0x96, 0xcf, 0x13, 0x92, 0x10, 0x4e, 0x6d, 0x71, 0xea, 0x5a, 0x96, 0x9a, 0x05,
- 0x08, 0xc5, 0x10, 0xef, 0x21, 0x14, 0x27, 0x03, 0xd1, 0x47, 0x62, 0x9e, 0x47, 0x4d, 0x6b, 0x3d,
- 0x4b, 0xcd, 0x12, 0x0a, 0xa5, 0x31, 0x3e, 0x42, 0xdb, 0x7c, 0x75, 0x5f, 0x05, 0x54, 0xa4, 0x23,
- 0x4d, 0xa2, 0x80, 0xb8, 0x3c, 0x69, 0x9a, 0x56, 0x27, 0x4b, 0xcd, 0x5a, 0x3f, 0xd4, 0xa2, 0xb8,
- 0x8b, 0x5a, 0xf1, 0xc4, 0xf7, 0x68, 0xdc, 0x59, 0xe6, 0x7a, 0xc4, 0xea, 0x57, 0x20, 0x20, 0x9f,
- 0x9c, 0x33, 0xb2, 0x23, 0x37, 0xee, 0xa0, 0x12, 0x87, 0x23, 0x20, 0x9f, 0xf9, 0xaa, 0x4e, 0xc2,
- 0x98, 0x1e, 0x78, 0x3e, 0x25, 0x11, 0x3f, 0xbd, 0xce, 0x4a, 0x65, 0x55, 0x15, 0x3f, 0xd4, 0xa2,
- 0xf8, 0x27, 0xf4, 0x98, 0xe3, 0x7d, 0x1a, 0x25, 0x0e, 0x4d, 0x22, 0xe2, 0x1e, 0x13, 0x6a, 0xbb,
- 0x36, 0xb5, 0x2b, 0x29, 0xb1, 0xca, 0xc3, 0xbf, 0x97, 0xa5, 0xe6, 0xed, 0x04, 0x70, 0x3b, 0x5a,
- 0xf7, 0x0b, 0xd4, 0x96, 0x3d, 0x9f, 0xb5, 0xc9, 0x98, 0x86, 0x11, 0xa9, 0x74, 0xd6, 0x3e, 0xc3,
- 0x8a, 0x36, 0xc9, 0x29, 0x20, 0x1e, 0xdd, 0xdf, 0xef, 0xa0, 0xa5, 0xc3, 0xa2, 0xb5, 0xaf, 0xf2,
- 0x39, 0x81, 0xb0, 0x3a, 0x16, 0xf5, 0xa6, 0x5b, 0x9b, 0xac, 0xbd, 0x94, 0x71, 0x50, 0x2c, 0x7c,
- 0x80, 0x30, 0xb7, 0xf7, 0x59, 0xab, 0x8e, 0x8f, 0x6d, 0xca, 0xb5, 0xa2, 0xa8, 0xde, 0xc8, 0x52,
- 0xb3, 0xc6, 0x0b, 0x35, 0x58, 0x3e, 0xbb, 0xc5, 0xed, 0x58, 0xd6, 0x50, 0x31, 0xbb, 0xc4, 0x41,
- 0xb1, 0xf0, 0xe7, 0x68, 0xbd, 0xa8, 0x80, 0x3e, 0x09, 0xa8, 0x2c, 0x18, 0x9c, 0xa5, 0x66, 0xc5,
- 0x03, 0x15, 0xbb, 0x38, 0x2f, 0xfd, 0xd6, 0xe7, 0xf5, 0x57, 0x13, 0xe9, 0xdc, 0x9f, 0x4f, 0x2c,
- 0x36, 0x01, 0xe4, 0x54, 0xb6, 0xa7, 0x62, 0xe2, 0xdc, 0x03, 0x15, 0x1b, 0x7f, 0x8d, 0xee, 0x97,
- 0x90, 0xa7, 0xe1, 0x8f, 0x81, 0x1f, 0xda, 0x6e, 0x7e, 0x6a, 0x0f, 0xb3, 0xd4, 0xac, 0x27, 0x40,
- 0x3d, 0xcc, 0xde, 0x81, 0xa3, 0x60, 0xbc, 0x9e, 0x9b, 0xc5, 0x3b, 0x98, 0xf7, 0x42, 0x0d, 0x86,
- 0x1d, 0xf4, 0x90, 0x15, 0xef, 0x25, 0x90, 0x53, 0x12, 0x91, 0xc0, 0x21, 0x6e, 0x91, 0x7f, 0x9d,
- 0xb5, 0x1d, 0x6d, 0x77, 0xc9, 0x7a, 0x9c, 0xa5, 0xe6, 0x5b, 0x0b, 0x49, 0xb3, 0x24, 0x85, 0xc5,
- 0x71, 0x8a, 0x0b, 0x40, 0xe5, 0xf3, 0xca, 0xb0, 0x05, 0x17, 0x80, 0xd9, 0xfe, 0x80, 0x9c, 0xc6,
- 0x07, 0x84, 0x3a, 0xa3, 0xbc, 0xb5, 0x95, 0xf7, 0xa7, 0x78, 0xa1, 0x06, 0xeb, 0xfe, 0xa1, 0x23,
- 0x9d, 0xcf, 0xc3, 0x5e, 0xdf, 0x88, 0xd8, 0xae, 0x98, 0x94, 0x55, 0x54, 0x39, 0x6f, 0x54, 0x0f,
- 0x54, 0x6c, 0x45, 0x2b, 0x7a, 0x87, 0x5e, 0xa3, 0x15, 0x5d, 0xa3, 0x62, 0xe3, 0x7d, 0x74, 0xcf,
- 0x25, 0x4e, 0x38, 0x9e, 0x44, 0xbc, 0x7c, 0xc5, 0xd4, 0x2d, 0x2e, 0xbf, 0x9f, 0xa5, 0xe6, 0xbc,
- 0x13, 0xe6, 0xa1, 0x6a, 0x10, 0xb1, 0x86, 0x76, 0x7d, 0x10, 0xb1, 0x8c, 0x79, 0x08, 0x3f, 0x41,
- 0x1b, 0xd5, 0x75, 0x88, 0xc6, 0xbc, 0x95, 0xa5, 0x66, 0xd5, 0x05, 0x55, 0x80, 0xc9, 0x79, 0x2e,
- 0x3e, 0x4d, 0x26, 0xbe, 0xe7, 0xd8, 0x4c, 0xbe, 0x5c, 0xc8, 0x2b, 0x2e, 0xa8, 0x02, 0x4c, 0x3e,
- 0xa9, 0x34, 0x60, 0x54, 0xc8, 0x2b, 0x2e, 0xa8, 0x02, 0x78, 0x82, 0x76, 0xf2, 0x83, 0x5d, 0xd0,
- 0x22, 0x65, 0x43, 0x7f, 0x27, 0x4b, 0xcd, 0xd7, 0x72, 0xe1, 0xb5, 0x0c, 0x7c, 0x89, 0xde, 0x2e,
- 0x9f, 0xe1, 0xa2, 0x49, 0x45, 0x9b, 0x7f, 0x37, 0x4b, 0xcd, 0xdb, 0xd0, 0xe1, 0x36, 0xa4, 0xee,
- 0xdf, 0x4d, 0xa4, 0xf3, 0xab, 0x15, 0xeb, 0x91, 0x44, 0x7c, 0x16, 0x0f, 0xc2, 0x24, 0x50, 0x3a,
- 0x74, 0x19, 0x07, 0xc5, 0xc2, 0x5f, 0xa2, 0x4d, 0x32, 0xfb, 0x98, 0x9e, 0x27, 0xac, 0xd7, 0x8b,
- 0x4e, 0xa3, 0x5b, 0xdb, 0x59, 0x6a, 0xce, 0xf9, 0x60, 0x0e, 0xc1, 0x9f, 0xa2, 0x35, 0x89, 0xf1,
- 0xe6, 0x27, 0x2e, 0x38, 0xba, 0x75, 0x2f, 0x4b, 0x4d, 0xd5, 0x01, 0xaa, 0xc9, 0x84, 0xfc, 0x46,
- 0x06, 0xc4, 0x21, 0xde, 0x45, 0x7e, 0x9d, 0xe1, 0x42, 0xc5, 0x01, 0xaa, 0xc9, 0x2e, 0x26, 0x1c,
- 0xe0, 0x2d, 0x5d, 0x94, 0x17, 0xbf, 0x98, 0xe4, 0x20, 0x14, 0x43, 0x76, 0xdf, 0x89, 0xc4, 0x5a,
- 0x45, 0x2d, 0xe9, 0xe2, 0xbe, 0x33, 0xc3, 0x20, 0x1f, 0xb1, 0x03, 0x74, 0xcb, 0x2d, 0xb2, 0x5d,
- 0x7c, 0x64, 0xca, 0x38, 0x28, 0x16, 0xab, 0x37, 0xde, 0xce, 0x8e, 0x48, 0x30, 0xa4, 0xa3, 0x3e,
- 0x89, 0x2e, 0xf2, 0x5b, 0x0c, 0xaf, 0xb7, 0x39, 0x27, 0xcc, 0x43, 0xd6, 0xe0, 0xea, 0xda, 0x68,
- 0xbc, 0xb8, 0x36, 0x1a, 0x2f, 0xaf, 0x0d, 0xed, 0xe7, 0xa9, 0xa1, 0xfd, 0x36, 0x35, 0xb4, 0xe7,
- 0x53, 0x43, 0xbb, 0x9a, 0x1a, 0xda, 0x3f, 0x53, 0x43, 0xfb, 0x77, 0x6a, 0x34, 0x5e, 0x4e, 0x0d,
- 0xed, 0x97, 0x1b, 0xa3, 0x71, 0x75, 0x63, 0x34, 0x5e, 0xdc, 0x18, 0x8d, 0xef, 0x3f, 0x18, 0x7a,
- 0x74, 0x94, 0x0c, 0xf6, 0x9c, 0x70, 0xdc, 0x1b, 0x46, 0xf6, 0xa9, 0x1d, 0xd8, 0x3d, 0x3f, 0x3c,
- 0xf3, 0x7a, 0x75, 0xff, 0x42, 0x07, 0x2d, 0xfe, 0x1f, 0xf3, 0xe3, 0xff, 0x03, 0x00, 0x00, 0xff,
- 0xff, 0x38, 0x60, 0xd8, 0x7d, 0xa4, 0x0e, 0x00, 0x00,
+ 0x14, 0x8e, 0x27, 0x75, 0xd2, 0xde, 0x3e, 0xe7, 0xb6, 0xc3, 0x64, 0x18, 0x64, 0x97, 0xc0, 0x88,
+ 0x22, 0x50, 0x23, 0x1e, 0x12, 0x02, 0x31, 0x12, 0x72, 0x87, 0x4a, 0x95, 0x5a, 0x51, 0x4e, 0x40,
+ 0x42, 0xb0, 0x72, 0xec, 0xdb, 0xc4, 0xaa, 0x63, 0xa7, 0xf6, 0x75, 0x99, 0xae, 0xe0, 0x27, 0xb0,
+ 0x62, 0xc3, 0x1f, 0x60, 0xc3, 0x8a, 0x15, 0x6b, 0x36, 0xb3, 0xec, 0x72, 0x56, 0x16, 0x4d, 0x37,
+ 0xc8, 0xab, 0x91, 0xf8, 0x03, 0xe8, 0x3e, 0xe2, 0x57, 0xec, 0x99, 0x6c, 0xea, 0x7b, 0xbe, 0xf3,
+ 0x7d, 0xe7, 0x3e, 0x72, 0xce, 0xb9, 0xb7, 0x68, 0x77, 0x72, 0x3e, 0xec, 0xb9, 0xfe, 0xf0, 0xc2,
+ 0x1d, 0xfb, 0x36, 0x71, 0x7b, 0x21, 0x35, 0x69, 0x28, 0xfe, 0xee, 0x4f, 0x02, 0x9f, 0xfa, 0x58,
+ 0xe5, 0xc6, 0xeb, 0x3b, 0x43, 0x7f, 0xe8, 0x73, 0xa4, 0xc7, 0x46, 0xc2, 0xd9, 0xfd, 0x4f, 0x41,
+ 0x2d, 0x20, 0x61, 0xe4, 0x52, 0xfc, 0x29, 0x6a, 0x87, 0xd1, 0x78, 0x6c, 0x06, 0x57, 0x1d, 0x65,
+ 0x57, 0xd9, 0x5b, 0xfd, 0x70, 0x63, 0x5f, 0x84, 0xe9, 0x0b, 0xd4, 0xd8, 0x7c, 0x16, 0xeb, 0x8d,
+ 0x24, 0xd6, 0x67, 0x34, 0x98, 0x0d, 0x98, 0xf4, 0x22, 0x22, 0x81, 0x43, 0x82, 0xce, 0x9d, 0x82,
+ 0xf4, 0x6b, 0x81, 0x66, 0x52, 0x49, 0x83, 0xd9, 0x00, 0x3f, 0x46, 0xcb, 0x8e, 0x37, 0x24, 0x21,
+ 0x25, 0x41, 0xa7, 0xc9, 0xb5, 0x9b, 0x52, 0x7b, 0x24, 0x61, 0x63, 0x4b, 0x8a, 0x53, 0x22, 0xa4,
+ 0x23, 0xfc, 0x31, 0x6a, 0x59, 0xa6, 0x35, 0x22, 0x61, 0x67, 0x89, 0x8b, 0xd7, 0xa5, 0xf8, 0x80,
+ 0x83, 0xc6, 0xba, 0x94, 0xaa, 0x9c, 0x04, 0x92, 0xdb, 0xfd, 0x6d, 0x09, 0xb5, 0x04, 0x03, 0x7f,
+ 0x80, 0x54, 0x6b, 0x14, 0x79, 0xe7, 0x72, 0xcf, 0x6b, 0x79, 0x7d, 0x4e, 0xce, 0x28, 0x20, 0x3e,
+ 0x4c, 0xe2, 0x78, 0x36, 0x79, 0x2a, 0xf7, 0x5a, 0x23, 0xe1, 0x14, 0x10, 0x1f, 0xb6, 0xcc, 0x80,
+ 0x9f, 0xb2, 0xdc, 0x63, 0x51, 0xb3, 0x21, 0x35, 0x92, 0x03, 0xf2, 0x8b, 0x0f, 0xd0, 0x2a, 0xa7,
+ 0x89, 0x1f, 0x48, 0xee, 0xb0, 0x28, 0xdd, 0x96, 0xd2, 0x3c, 0x11, 0xf2, 0x06, 0x3e, 0x44, 0x6b,
+ 0x97, 0xbe, 0x1b, 0x8d, 0x89, 0x8c, 0xa2, 0x56, 0x44, 0xd9, 0x91, 0x51, 0x0a, 0x4c, 0x28, 0x58,
+ 0x2c, 0x4e, 0xc8, 0x7e, 0xb2, 0xd9, 0x6a, 0x5a, 0x2f, 0x8b, 0x93, 0x67, 0x42, 0xc1, 0x62, 0x9b,
+ 0x72, 0xcd, 0x01, 0x71, 0x65, 0x98, 0xf6, 0xcb, 0x36, 0x95, 0x23, 0x42, 0xde, 0xc0, 0x3f, 0xa0,
+ 0x6d, 0xc7, 0x0b, 0xa9, 0xe9, 0xd1, 0x13, 0x42, 0x03, 0xc7, 0x92, 0xc1, 0x96, 0x2b, 0x82, 0x3d,
+ 0x94, 0xc1, 0xaa, 0x04, 0x50, 0x05, 0x76, 0xff, 0x6a, 0xa1, 0xb6, 0xcc, 0x79, 0xfc, 0x2d, 0xba,
+ 0x3f, 0xb8, 0xa2, 0x24, 0x3c, 0x0d, 0x7c, 0x8b, 0x84, 0x21, 0xb1, 0x4f, 0x49, 0xd0, 0x27, 0x96,
+ 0xef, 0xd9, 0x3c, 0x61, 0x9a, 0xc6, 0xc3, 0x24, 0xd6, 0xeb, 0x28, 0x50, 0xe7, 0x60, 0x61, 0x5d,
+ 0xc7, 0xab, 0x0c, 0x7b, 0x27, 0x0b, 0x5b, 0x43, 0x81, 0x3a, 0x07, 0x3e, 0x42, 0xdb, 0xd4, 0xa7,
+ 0xa6, 0x6b, 0x14, 0xa6, 0xe5, 0x39, 0xd7, 0x34, 0xee, 0xb3, 0x43, 0xa8, 0x70, 0x43, 0x15, 0x98,
+ 0x86, 0x3a, 0x2e, 0x4c, 0xc5, 0x73, 0x30, 0x1f, 0xaa, 0xe8, 0x86, 0x2a, 0x10, 0xef, 0xa1, 0x65,
+ 0xf2, 0x94, 0x58, 0xdf, 0x38, 0x63, 0xc2, 0xb3, 0x4f, 0x31, 0xd6, 0x58, 0x35, 0xcf, 0x30, 0x48,
+ 0x47, 0xf8, 0x3d, 0xb4, 0x72, 0x11, 0x91, 0x88, 0x70, 0x6a, 0x8b, 0x53, 0xd7, 0x93, 0x58, 0xcf,
+ 0x40, 0xc8, 0x86, 0x78, 0x1f, 0xa1, 0x30, 0x1a, 0x88, 0x3e, 0x12, 0xf2, 0x3c, 0x6a, 0x1a, 0x1b,
+ 0x49, 0xac, 0xe7, 0x50, 0xc8, 0x8d, 0xf1, 0x31, 0xda, 0xe1, 0xab, 0xfb, 0xd2, 0xa3, 0x22, 0x1d,
+ 0x69, 0x14, 0x78, 0xc4, 0xe6, 0x49, 0xd3, 0x34, 0x3a, 0x49, 0xac, 0x57, 0xfa, 0xa1, 0x12, 0xc5,
+ 0x5d, 0xd4, 0x0a, 0x27, 0xae, 0x43, 0xc3, 0xce, 0x0a, 0xd7, 0x23, 0x56, 0xbf, 0x02, 0x01, 0xf9,
+ 0xe5, 0x9c, 0x91, 0x19, 0xd8, 0x61, 0x07, 0xe5, 0x38, 0x1c, 0x01, 0xf9, 0x4d, 0x57, 0x75, 0xea,
+ 0x87, 0xf4, 0xd0, 0x71, 0x29, 0x09, 0xf8, 0xe9, 0x75, 0x56, 0x4b, 0xab, 0x2a, 0xf9, 0xa1, 0x12,
+ 0xc5, 0x3f, 0xa1, 0x47, 0x1c, 0xef, 0xd3, 0x20, 0xb2, 0x68, 0x14, 0x10, 0xfb, 0x84, 0x50, 0xd3,
+ 0x36, 0xa9, 0x59, 0x4a, 0x89, 0x35, 0x1e, 0xfe, 0xdd, 0x24, 0xd6, 0x17, 0x13, 0xc0, 0x62, 0xb4,
+ 0xee, 0xe7, 0xa8, 0x2d, 0x7b, 0x3e, 0x6b, 0x93, 0x21, 0xf5, 0x03, 0x52, 0xea, 0xac, 0x7d, 0x86,
+ 0x65, 0x6d, 0x92, 0x53, 0x40, 0x7c, 0xba, 0x7f, 0xdc, 0x41, 0xcb, 0x47, 0x59, 0x6b, 0x5f, 0xe3,
+ 0x73, 0x02, 0x61, 0x75, 0x2c, 0xea, 0x4d, 0x35, 0xb6, 0x58, 0x7b, 0xc9, 0xe3, 0x50, 0xb0, 0xf0,
+ 0x21, 0xc2, 0xdc, 0x3e, 0x60, 0xad, 0x3a, 0x3c, 0x31, 0x29, 0xd7, 0x8a, 0xa2, 0x7a, 0x2d, 0x89,
+ 0xf5, 0x0a, 0x2f, 0x54, 0x60, 0xe9, 0xec, 0x06, 0xb7, 0x43, 0x59, 0x43, 0xd9, 0xec, 0x12, 0x87,
+ 0x82, 0x85, 0x3f, 0x43, 0x1b, 0x59, 0x05, 0xf4, 0x89, 0x47, 0x65, 0xc1, 0xe0, 0x24, 0xd6, 0x4b,
+ 0x1e, 0x28, 0xd9, 0xd9, 0x79, 0xa9, 0x0b, 0x9f, 0xd7, 0xaf, 0x4b, 0x48, 0xe5, 0xfe, 0x74, 0x62,
+ 0xb1, 0x09, 0x20, 0x67, 0xb2, 0x3d, 0x65, 0x13, 0xa7, 0x1e, 0x28, 0xd9, 0xf8, 0x2b, 0x74, 0x2f,
+ 0x87, 0x3c, 0xf1, 0x7f, 0xf4, 0x5c, 0xdf, 0xb4, 0xd3, 0x53, 0x7b, 0x90, 0xc4, 0x7a, 0x35, 0x01,
+ 0xaa, 0x61, 0xf6, 0x1b, 0x58, 0x05, 0x8c, 0xd7, 0x73, 0x33, 0xfb, 0x0d, 0xe6, 0xbd, 0x50, 0x81,
+ 0x61, 0x0b, 0x3d, 0x60, 0xc5, 0x7b, 0x05, 0xe4, 0x8c, 0x04, 0xc4, 0xb3, 0x88, 0x9d, 0xe5, 0x5f,
+ 0x67, 0x7d, 0x57, 0xd9, 0x5b, 0x36, 0x1e, 0x25, 0xb1, 0xfe, 0x66, 0x2d, 0x69, 0x96, 0xa4, 0x50,
+ 0x1f, 0x27, 0x7b, 0x00, 0x94, 0xae, 0x57, 0x86, 0xd5, 0x3c, 0x00, 0x66, 0xfb, 0x03, 0x72, 0x16,
+ 0x1e, 0x12, 0x6a, 0x8d, 0xd2, 0xd6, 0x96, 0xdf, 0x5f, 0xc1, 0x0b, 0x15, 0x18, 0xfe, 0x0e, 0x75,
+ 0x2c, 0x9f, 0xa7, 0xbb, 0xe3, 0x7b, 0x07, 0xbe, 0x47, 0x03, 0xdf, 0x3d, 0x36, 0x29, 0xf1, 0xac,
+ 0x2b, 0xde, 0xfd, 0x9a, 0xc6, 0x1b, 0x49, 0xac, 0xd7, 0x72, 0xa0, 0xd6, 0xd3, 0xfd, 0x53, 0x45,
+ 0x2a, 0xdf, 0x01, 0x4b, 0x8c, 0x11, 0x31, 0x6d, 0xb1, 0x1d, 0x56, 0xab, 0xf9, 0x8c, 0x2c, 0x7a,
+ 0xa0, 0x64, 0x17, 0xb4, 0xa2, 0x2b, 0xa9, 0x15, 0x5a, 0xd1, 0x8f, 0x4a, 0x36, 0x3e, 0x40, 0x77,
+ 0x6d, 0x62, 0xf9, 0xe3, 0x49, 0xc0, 0x1b, 0x83, 0x98, 0x5a, 0x6c, 0xea, 0x5e, 0x12, 0xeb, 0xf3,
+ 0x4e, 0x98, 0x87, 0xca, 0x41, 0xc4, 0x1a, 0xda, 0xd5, 0x41, 0xc4, 0x32, 0xe6, 0x21, 0xfc, 0x18,
+ 0x6d, 0x96, 0xd7, 0x21, 0x5a, 0xfe, 0x76, 0x12, 0xeb, 0x65, 0x17, 0x94, 0x01, 0x26, 0xe7, 0x59,
+ 0xfe, 0x24, 0x9a, 0xb8, 0x8e, 0x65, 0x32, 0xf9, 0x4a, 0x26, 0x2f, 0xb9, 0xa0, 0x0c, 0x30, 0xf9,
+ 0xa4, 0xd4, 0xda, 0x51, 0x26, 0x2f, 0xb9, 0xa0, 0x0c, 0xe0, 0x09, 0xda, 0x4d, 0x0f, 0xb6, 0xa6,
+ 0xf9, 0xca, 0xab, 0xe2, 0xed, 0x24, 0xd6, 0x5f, 0xc9, 0x85, 0x57, 0x32, 0xf0, 0x15, 0x7a, 0x2b,
+ 0x7f, 0x86, 0x75, 0x93, 0x8a, 0x0b, 0xe4, 0x9d, 0x24, 0xd6, 0x17, 0xa1, 0xc3, 0x22, 0xa4, 0xee,
+ 0xdf, 0x4d, 0xa4, 0xf2, 0x47, 0x1b, 0xeb, 0xbe, 0x44, 0x5c, 0xb8, 0x87, 0x7e, 0xe4, 0x15, 0x7a,
+ 0x7f, 0x1e, 0x87, 0x82, 0x85, 0xbf, 0x40, 0x5b, 0x64, 0x76, 0x4d, 0x5f, 0x44, 0xec, 0x16, 0x11,
+ 0x3d, 0x4c, 0x35, 0x76, 0x92, 0x58, 0x9f, 0xf3, 0xc1, 0x1c, 0x82, 0x3f, 0x41, 0xeb, 0x12, 0xe3,
+ 0x6d, 0x55, 0x3c, 0x9d, 0x54, 0xe3, 0x6e, 0x12, 0xeb, 0x45, 0x07, 0x14, 0x4d, 0x26, 0xe4, 0x6f,
+ 0x3d, 0x20, 0x16, 0x71, 0x2e, 0xd3, 0x87, 0x12, 0x17, 0x16, 0x1c, 0x50, 0x34, 0xd9, 0x93, 0x87,
+ 0x03, 0xfc, 0xb2, 0x10, 0xe5, 0xc5, 0x9f, 0x3c, 0x29, 0x08, 0xd9, 0x90, 0xbd, 0xa4, 0x02, 0xb1,
+ 0x56, 0x51, 0x4b, 0xaa, 0x78, 0x49, 0xcd, 0x30, 0x48, 0x47, 0xec, 0x00, 0xed, 0x7c, 0xf3, 0x6d,
+ 0x67, 0xd7, 0x57, 0x1e, 0x87, 0x82, 0xc5, 0xea, 0x8d, 0x37, 0xca, 0x63, 0xe2, 0x0d, 0xe9, 0xa8,
+ 0x4f, 0x82, 0xcb, 0xf4, 0x7d, 0xc4, 0xeb, 0x6d, 0xce, 0x09, 0xf3, 0x90, 0x31, 0xb8, 0xbe, 0xd1,
+ 0x1a, 0xcf, 0x6f, 0xb4, 0xc6, 0x8b, 0x1b, 0x4d, 0xf9, 0x79, 0xaa, 0x29, 0xbf, 0x4f, 0x35, 0xe5,
+ 0xd9, 0x54, 0x53, 0xae, 0xa7, 0x9a, 0xf2, 0xcf, 0x54, 0x53, 0xfe, 0x9d, 0x6a, 0x8d, 0x17, 0x53,
+ 0x4d, 0xf9, 0xe5, 0x56, 0x6b, 0x5c, 0xdf, 0x6a, 0x8d, 0xe7, 0xb7, 0x5a, 0xe3, 0xfb, 0xf7, 0x87,
+ 0x0e, 0x1d, 0x45, 0x83, 0x7d, 0xcb, 0x1f, 0xf7, 0x86, 0x81, 0x79, 0x66, 0x7a, 0x66, 0xcf, 0xf5,
+ 0xcf, 0x9d, 0x5e, 0xd5, 0xff, 0xb7, 0x83, 0x16, 0xff, 0xef, 0xf5, 0xa3, 0xff, 0x03, 0x00, 0x00,
+ 0xff, 0xff, 0x39, 0xf0, 0xb8, 0xbf, 0xfe, 0x0e, 0x00, 0x00,
}
func (this *Result) Equal(that interface{}) bool {
@@ -1094,6 +1105,9 @@ func (this *Store) Equal(that interface{}) bool {
if this.ChunkRefsFetchTime != that1.ChunkRefsFetchTime {
return false
}
+ if this.CongestionControlLatency != that1.CongestionControlLatency {
+ return false
+ }
return true
}
func (this *Chunk) Equal(that interface{}) bool {
@@ -1268,7 +1282,7 @@ func (this *Store) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 10)
+ s := make([]string, 0, 11)
s = append(s, "&stats.Store{")
s = append(s, "TotalChunksRef: "+fmt.Sprintf("%#v", this.TotalChunksRef)+",\n")
s = append(s, "TotalChunksDownloaded: "+fmt.Sprintf("%#v", this.TotalChunksDownloaded)+",\n")
@@ -1276,6 +1290,7 @@ func (this *Store) GoString() string {
s = append(s, "QueryReferencedStructured: "+fmt.Sprintf("%#v", this.QueryReferencedStructured)+",\n")
s = append(s, "Chunk: "+strings.Replace(this.Chunk.GoString(), `&`, ``, 1)+",\n")
s = append(s, "ChunkRefsFetchTime: "+fmt.Sprintf("%#v", this.ChunkRefsFetchTime)+",\n")
+ s = append(s, "CongestionControlLatency: "+fmt.Sprintf("%#v", this.CongestionControlLatency)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -1689,6 +1704,11 @@ func (m *Store) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x68
}
+ if m.CongestionControlLatency != 0 {
+ i = encodeVarintStats(dAtA, i, uint64(m.CongestionControlLatency))
+ i--
+ dAtA[i] = 0x30
+ }
if m.ChunkRefsFetchTime != 0 {
i = encodeVarintStats(dAtA, i, uint64(m.ChunkRefsFetchTime))
i--
@@ -2005,6 +2025,9 @@ func (m *Store) Size() (n int) {
if m.ChunkRefsFetchTime != 0 {
n += 1 + sovStats(uint64(m.ChunkRefsFetchTime))
}
+ if m.CongestionControlLatency != 0 {
+ n += 1 + sovStats(uint64(m.CongestionControlLatency))
+ }
if m.QueryReferencedStructured {
n += 2
}
@@ -2171,6 +2194,7 @@ func (this *Store) String() string {
`ChunksDownloadTime:` + fmt.Sprintf("%v", this.ChunksDownloadTime) + `,`,
`Chunk:` + strings.Replace(strings.Replace(this.Chunk.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + `,`,
`ChunkRefsFetchTime:` + fmt.Sprintf("%v", this.ChunkRefsFetchTime) + `,`,
+ `CongestionControlLatency:` + fmt.Sprintf("%v", this.CongestionControlLatency) + `,`,
`QueryReferencedStructured:` + fmt.Sprintf("%v", this.QueryReferencedStructured) + `,`,
`}`,
}, "")
@@ -3372,6 +3396,25 @@ func (m *Store) Unmarshal(dAtA []byte) error {
break
}
}
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CongestionControlLatency", wireType)
+ }
+ m.CongestionControlLatency = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CongestionControlLatency |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field QueryReferencedStructured", wireType)
diff --git a/pkg/logqlmodel/stats/stats.proto b/pkg/logqlmodel/stats/stats.proto
index d36b8e557d98..20ad6077392b 100644
--- a/pkg/logqlmodel/stats/stats.proto
+++ b/pkg/logqlmodel/stats/stats.proto
@@ -136,6 +136,9 @@ message Store {
// Time spent fetching chunk refs from index.
int64 chunkRefsFetchTime = 5 [(gogoproto.jsontag) = "chunkRefsFetchTime"];
+
+ // Time spent being blocked on congestion control.
+ int64 congestionControlLatency = 6 [(gogoproto.jsontag) = "congestionControlLatency"];
}
message Chunk {
diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go
index 52e3cc8551b7..fa6fa9e03671 100644
--- a/pkg/querier/queryrange/codec_test.go
+++ b/pkg/querier/queryrange/codec_test.go
@@ -1561,6 +1561,7 @@ var (
"totalDuplicates": 8
},
"chunksDownloadTime": 0,
+ "congestionControlLatency": 0,
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunkRefsFetchTime": 0,
@@ -1585,6 +1586,7 @@ var (
"totalDuplicates": 19
},
"chunksDownloadTime": 16,
+ "congestionControlLatency": 0,
"totalChunksRef": 17,
"totalChunksDownloaded": 18,
"chunkRefsFetchTime": 19,
@@ -2018,6 +2020,7 @@ var (
TotalDuplicates: 19,
},
ChunksDownloadTime: 16,
+ CongestionControlLatency: 0,
TotalChunksRef: 17,
TotalChunksDownloaded: 18,
ChunkRefsFetchTime: 19,
diff --git a/pkg/querier/queryrange/prometheus_test.go b/pkg/querier/queryrange/prometheus_test.go
index 4ec798b534a7..80e4f5367afb 100644
--- a/pkg/querier/queryrange/prometheus_test.go
+++ b/pkg/querier/queryrange/prometheus_test.go
@@ -16,6 +16,7 @@ var emptyStats = `"stats": {
"ingester" : {
"store": {
"chunksDownloadTime": 0,
+ "congestionControlLatency": 0,
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunkRefsFetchTime": 0,
@@ -40,6 +41,7 @@ var emptyStats = `"stats": {
"querier": {
"store": {
"chunksDownloadTime": 0,
+ "congestionControlLatency": 0,
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunkRefsFetchTime": 0,
diff --git a/pkg/storage/chunk/client/congestion/controller.go b/pkg/storage/chunk/client/congestion/controller.go
index 7b53cea12c02..f1f69a29621e 100644
--- a/pkg/storage/chunk/client/congestion/controller.go
+++ b/pkg/storage/chunk/client/congestion/controller.go
@@ -10,6 +10,7 @@ import (
"github.com/go-kit/log"
"golang.org/x/time/rate"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/storage/chunk/client"
)
@@ -92,6 +93,9 @@ func (a *AIMDController) GetObject(ctx context.Context, objectKey string) (io.Re
// TODO(dannyk): use hedging client to handle requests, do NOT hedge retries
+ start := time.Now()
+ statsCtx := stats.FromContext(ctx)
+
rc, sz, err := a.retrier.Do(
func(attempt int) (io.ReadCloser, int64, error) {
a.metrics.requests.Add(1)
@@ -111,6 +115,8 @@ func (a *AIMDController) GetObject(ctx context.Context, objectKey string) (io.Re
a.metrics.backoffSec.Add(delay.Seconds())
}
+ statsCtx.AddCongestionControlLatency(time.Since(start))
+
// It is vitally important that retries are DISABLED in the inner implementation.
// Some object storage clients implement retries internally, and this will interfere here.
return a.inner.GetObject(ctx, objectKey)
diff --git a/pkg/storage/chunk/client/congestion/controller_test.go b/pkg/storage/chunk/client/congestion/controller_test.go
index c65f1333e919..f63d12da20a0 100644
--- a/pkg/storage/chunk/client/congestion/controller_test.go
+++ b/pkg/storage/chunk/client/congestion/controller_test.go
@@ -14,6 +14,7 @@ import (
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/storage/chunk/client"
)
@@ -165,8 +166,10 @@ func TestAIMDReducedThroughput(t *testing.T) {
cli := newMockObjectClient(triggeredFailer{trigger: &trigger})
ctrl.Wrap(cli)
+ statsCtx, ctx := stats.NewContext(context.Background())
+
// run for 1 second, measure the per-second rate of requests & successful responses
- count, success := runAndMeasureRate(ctrl, time.Second)
+ count, success := runAndMeasureRate(ctx, ctrl, time.Second)
require.Greater(t, count, 1.0)
require.Greater(t, success, 1.0)
// no time spent backing off because the per-second limit will not be hit
@@ -195,7 +198,7 @@ func TestAIMDReducedThroughput(t *testing.T) {
}(&trigger)
// now, run the requests again but there will now be a failure rate & some throttling involved
- count, success = runAndMeasureRate(ctrl, time.Second)
+ count, success = runAndMeasureRate(ctx, ctrl, time.Second)
done <- true
wg.Wait()
@@ -206,9 +209,12 @@ func TestAIMDReducedThroughput(t *testing.T) {
// should have fewer successful requests than total since we are failing some
require.Less(t, success, count)
+
+ // should have registered some congestion latency in stats
+ require.NotZero(t, statsCtx.Querier().Store.CongestionControlLatency)
}
-func runAndMeasureRate(ctrl Controller, duration time.Duration) (float64, float64) {
+func runAndMeasureRate(ctx context.Context, ctrl Controller, duration time.Duration) (float64, float64) {
var count, success float64
tick := time.NewTimer(duration)
@@ -218,8 +224,6 @@ func runAndMeasureRate(ctrl Controller, duration time.Duration) (float64, float6
case <-tick.C:
goto result
default:
- ctx := context.Background()
-
count++
_, _, err := ctrl.GetObject(ctx, "foo")
if err == nil {
diff --git a/pkg/util/marshal/legacy/marshal_test.go b/pkg/util/marshal/legacy/marshal_test.go
index a3dca73ac299..88375ad842ab 100644
--- a/pkg/util/marshal/legacy/marshal_test.go
+++ b/pkg/util/marshal/legacy/marshal_test.go
@@ -59,6 +59,7 @@ var queryTests = []struct {
"ingester" : {
"store": {
"chunksDownloadTime": 0,
+ "congestionControlLatency": 0,
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunkRefsFetchTime": 0,
@@ -83,6 +84,7 @@ var queryTests = []struct {
"querier": {
"store": {
"chunksDownloadTime": 0,
+ "congestionControlLatency": 0,
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunkRefsFetchTime": 0,
diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go
index ce7a49f97e76..ca932064ca6c 100644
--- a/pkg/util/marshal/marshal_test.go
+++ b/pkg/util/marshal/marshal_test.go
@@ -27,6 +27,7 @@ const emptyStats = `{
"ingester" : {
"store": {
"chunksDownloadTime": 0,
+ "congestionControlLatency": 0,
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunkRefsFetchTime": 0,
@@ -51,6 +52,7 @@ const emptyStats = `{
"querier": {
"store": {
"chunksDownloadTime": 0,
+ "congestionControlLatency": 0,
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunkRefsFetchTime": 0,
From b5f488caec9b4f3f25748de993a29711d1f56182 Mon Sep 17 00:00:00 2001
From: Danny Kopping
Date: Wed, 28 Feb 2024 10:12:21 +0200
Subject: [PATCH 123/130] fix(query/query-frontend): correct congestion control
stats aggregation (#12072)
Signed-off-by: Danny Kopping
---
pkg/logqlmodel/stats/context.go | 9 +++++----
pkg/storage/chunk/client/congestion/controller_test.go | 2 +-
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go
index 05426840015c..65bbb4fde875 100644
--- a/pkg/logqlmodel/stats/context.go
+++ b/pkg/logqlmodel/stats/context.go
@@ -96,9 +96,9 @@ func (c *Context) Ingester() Ingester {
}
}
-// Querier returns the store statistics accumulated so far.
-func (c *Context) Querier() Querier {
- return c.querier
+// Store returns the store statistics accumulated so far.
+func (c *Context) Store() Store {
+ return c.store
}
// Caches returns the cache statistics accumulated so far.
@@ -188,6 +188,7 @@ func (r *Result) ComputeSummary(execTime time.Duration, queueTime time.Duration,
func (s *Store) Merge(m Store) {
s.TotalChunksRef += m.TotalChunksRef
s.TotalChunksDownloaded += m.TotalChunksDownloaded
+ s.CongestionControlLatency += m.CongestionControlLatency
s.ChunksDownloadTime += m.ChunksDownloadTime
s.ChunkRefsFetchTime += m.ChunkRefsFetchTime
s.Chunk.HeadChunkBytes += m.Chunk.HeadChunkBytes
@@ -370,7 +371,7 @@ func (c *Context) AddChunkRefsFetchTime(i time.Duration) {
}
func (c *Context) AddCongestionControlLatency(i time.Duration) {
- atomic.AddInt64(&c.querier.Store.CongestionControlLatency, int64(i))
+ atomic.AddInt64(&c.store.CongestionControlLatency, int64(i))
}
func (c *Context) AddChunksDownloaded(i int64) {
diff --git a/pkg/storage/chunk/client/congestion/controller_test.go b/pkg/storage/chunk/client/congestion/controller_test.go
index f63d12da20a0..6ecc208c1a3f 100644
--- a/pkg/storage/chunk/client/congestion/controller_test.go
+++ b/pkg/storage/chunk/client/congestion/controller_test.go
@@ -211,7 +211,7 @@ func TestAIMDReducedThroughput(t *testing.T) {
require.Less(t, success, count)
// should have registered some congestion latency in stats
- require.NotZero(t, statsCtx.Querier().Store.CongestionControlLatency)
+ require.NotZero(t, statsCtx.Store().CongestionControlLatency)
}
func runAndMeasureRate(ctx context.Context, ctrl Controller, duration time.Duration) (float64, float64) {
From 637c292cd8ba3b22670838a862bfebd412315708 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Wed, 28 Feb 2024 13:04:51 +0100
Subject: [PATCH 124/130] fix(blooms): Fix issue when loading cache on startup
if directory does not exist (#12080)
The check `!dirEntry.IsDir()` panicked in case `dirEntry` is `nil`, which is the case when the root directory does not exist.
The PR also adds more logging.
Signed-off-by: Christian Haudum
---
.../stores/shipper/bloomshipper/cache.go | 27 ++++++++++++++-----
1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go
index dd2538902bd9..45a6d01af315 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache.go
@@ -53,26 +53,39 @@ func NewBlocksCache(cfg cache.EmbeddedCacheConfig, reg prometheus.Registerer, lo
}
func LoadBlocksDirIntoCache(path string, c cache.TypedCache[string, BlockDirectory], logger log.Logger) error {
+ level.Debug(logger).Log("msg", "load bloomshipper working directory into cache", "path", path)
keys, values := loadBlockDirectories(path, logger)
return c.Store(context.Background(), keys, values)
}
-func loadBlockDirectories(path string, logger log.Logger) (keys []string, values []BlockDirectory) {
- resolver := NewPrefixedResolver(path, defaultKeyResolver{})
- _ = filepath.WalkDir(path, func(filename string, dirEntry fs.DirEntry, _ error) error {
+func loadBlockDirectories(root string, logger log.Logger) (keys []string, values []BlockDirectory) {
+ resolver := NewPrefixedResolver(root, defaultKeyResolver{})
+ _ = filepath.WalkDir(root, func(path string, dirEntry fs.DirEntry, e error) error {
+ if dirEntry == nil || e != nil {
+ level.Warn(logger).Log("msg", "failed to walk directory", "path", path, "dirEntry", dirEntry, "err", e)
+ return nil
+ }
+
if !dirEntry.IsDir() {
+ level.Warn(logger).Log("msg", "skip directory entry", "err", "not a directory", "path", path)
return nil
}
- ref, err := resolver.ParseBlockKey(key(filename))
+
+ ref, err := resolver.ParseBlockKey(key(path))
if err != nil {
+ level.Warn(logger).Log("msg", "skip directory entry", "err", err, "path", path)
return nil
}
- if ok, clean := isBlockDir(filename, logger); ok {
+
+ if ok, clean := isBlockDir(path, logger); ok {
keys = append(keys, resolver.Block(ref).Addr())
- values = append(values, NewBlockDirectory(ref, filename, logger))
+ values = append(values, NewBlockDirectory(ref, path, logger))
+ level.Debug(logger).Log("msg", "found block directory", "ref", ref, "path", path)
} else {
- _ = clean(filename)
+ level.Warn(logger).Log("msg", "skip directory entry", "err", "not a block directory", "path", path)
+ _ = clean(path)
}
+
return nil
})
return
From 184d1de4dba7c9bdba5a648a35e3132d0f76dfd1 Mon Sep 17 00:00:00 2001
From: Christian Haudum
Date: Wed, 28 Feb 2024 13:05:15 +0100
Subject: [PATCH 125/130] chore(blooms): Use `bloomshipper.Interval` instead of
`model.Interval` (#12079)
Signed-off-by: Christian Haudum
---
pkg/bloomgateway/bloomgateway.go | 7 +++--
pkg/bloomgateway/multiplexing.go | 15 ++++++----
pkg/bloomgateway/multiplexing_test.go | 7 +++--
pkg/bloomgateway/processor_test.go | 12 ++++----
pkg/bloomgateway/util.go | 18 +++++------
pkg/bloomgateway/util_test.go | 30 +++++++++----------
pkg/storage/bloom/v1/bloom_tokenizer.go | 1 +
.../stores/shipper/bloomshipper/cache_test.go | 3 +-
.../shipper/bloomshipper/resolver_test.go | 3 +-
9 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index ee358ebbbc66..b80cc908f719 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -306,13 +306,14 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
filters := syntax.ExtractLineFilters(req.Plan.AST)
tasks := make([]Task, 0, len(seriesByDay))
- for _, seriesWithBounds := range seriesByDay {
- task, err := NewTask(ctx, tenantID, seriesWithBounds, filters)
+ for _, seriesForDay := range seriesByDay {
+ task, err := NewTask(ctx, tenantID, seriesForDay, filters)
if err != nil {
return nil, err
}
+ level.Debug(g.logger).Log("msg", "creating task for day", "day", seriesForDay.day, "interval", seriesForDay.interval.String(), "task", task.ID)
tasks = append(tasks, task)
- numSeries += len(seriesWithBounds.series)
+ numSeries += len(seriesForDay.series)
}
g.activeUsers.UpdateUserTimestamp(tenantID, time.Now())
diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go
index c9e3e9cf6f4e..8486f6e6e7cf 100644
--- a/pkg/bloomgateway/multiplexing.go
+++ b/pkg/bloomgateway/multiplexing.go
@@ -13,6 +13,7 @@ import (
"github.com/grafana/loki/pkg/logql/syntax"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/config"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
const (
@@ -65,7 +66,7 @@ type Task struct {
// filters of the original request
filters []syntax.LineFilterExpr
// from..through date of the task's chunks
- bounds model.Interval
+ interval bloomshipper.Interval
// the context from the request
ctx context.Context
@@ -76,7 +77,7 @@ type Task struct {
// NewTask returns a new Task that can be enqueued to the task queue.
// In addition, it returns a result and an error channel, as well
// as an error if the instantiation fails.
-func NewTask(ctx context.Context, tenantID string, refs seriesWithBounds, filters []syntax.LineFilterExpr) (Task, error) {
+func NewTask(ctx context.Context, tenantID string, refs seriesWithInterval, filters []syntax.LineFilterExpr) (Task, error) {
key, err := ulid.New(ulid.Now(), entropy)
if err != nil {
return Task{}, err
@@ -89,8 +90,8 @@ func NewTask(ctx context.Context, tenantID string, refs seriesWithBounds, filter
resCh: make(chan v1.Output),
filters: filters,
series: refs.series,
- bounds: refs.bounds,
- table: refs.table,
+ interval: refs.interval,
+ table: refs.day,
ctx: ctx,
done: make(chan struct{}),
responses: make([]v1.Output, 0, len(refs.series)),
@@ -98,8 +99,10 @@ func NewTask(ctx context.Context, tenantID string, refs seriesWithBounds, filter
return task, nil
}
+// Bounds implements Bounded
+// see pkg/storage/stores/shipper/indexshipper/tsdb.Bounded
func (t Task) Bounds() (model.Time, model.Time) {
- return t.bounds.Start, t.bounds.End
+ return t.interval.Start, t.interval.End
}
func (t Task) Done() <-chan struct{} {
@@ -129,7 +132,7 @@ func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task {
resCh: t.resCh,
filters: t.filters,
series: series,
- bounds: t.bounds,
+ interval: t.interval,
table: t.table,
ctx: t.ctx,
done: make(chan struct{}),
diff --git a/pkg/bloomgateway/multiplexing_test.go b/pkg/bloomgateway/multiplexing_test.go
index a6ad0270d96e..af79f37b358b 100644
--- a/pkg/bloomgateway/multiplexing_test.go
+++ b/pkg/bloomgateway/multiplexing_test.go
@@ -12,6 +12,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
func TestTask(t *testing.T) {
@@ -58,9 +59,9 @@ func TestTask_RequestIterator(t *testing.T) {
tokenizer := v1.NewNGramTokenizer(4, 0)
t.Run("empty request yields empty iterator", func(t *testing.T) {
- swb := seriesWithBounds{
- bounds: model.Interval{Start: 0, End: math.MaxInt64},
- series: []*logproto.GroupedChunkRefs{},
+ swb := seriesWithInterval{
+ interval: bloomshipper.Interval{Start: 0, End: math.MaxInt64},
+ series: []*logproto.GroupedChunkRefs{},
}
task, _ := NewTask(context.Background(), tenant, swb, []syntax.LineFilterExpr{})
it := task.RequestIter(tokenizer)
diff --git a/pkg/bloomgateway/processor_test.go b/pkg/bloomgateway/processor_test.go
index 84687995833b..0c586897064b 100644
--- a/pkg/bloomgateway/processor_test.go
+++ b/pkg/bloomgateway/processor_test.go
@@ -104,13 +104,13 @@ func TestProcessor(t *testing.T) {
p := newProcessor("worker", mockStore, log.NewNopLogger(), metrics)
chunkRefs := createQueryInputFromBlockData(t, tenant, data, 10)
- swb := seriesWithBounds{
+ swb := seriesWithInterval{
series: groupRefs(t, chunkRefs),
- bounds: model.Interval{
+ interval: bloomshipper.Interval{
Start: now.Add(-1 * time.Hour),
End: now,
},
- table: config.NewDayTime(truncateDay(now)),
+ day: config.NewDayTime(truncateDay(now)),
}
filters := []syntax.LineFilterExpr{
{
@@ -153,13 +153,13 @@ func TestProcessor(t *testing.T) {
p := newProcessor("worker", mockStore, log.NewNopLogger(), metrics)
chunkRefs := createQueryInputFromBlockData(t, tenant, data, 10)
- swb := seriesWithBounds{
+ swb := seriesWithInterval{
series: groupRefs(t, chunkRefs),
- bounds: model.Interval{
+ interval: bloomshipper.Interval{
Start: now.Add(-1 * time.Hour),
End: now,
},
- table: config.NewDayTime(truncateDay(now)),
+ day: config.NewDayTime(truncateDay(now)),
}
filters := []syntax.LineFilterExpr{
{
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index c3ea06a3df53..dca14b7be3e5 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -128,14 +128,14 @@ func partitionTasks(tasks []Task, blocks []bloomshipper.BlockRef) []blockWithTas
return result
}
-type seriesWithBounds struct {
- bounds model.Interval
- table config.DayTime
- series []*logproto.GroupedChunkRefs
+type seriesWithInterval struct {
+ day config.DayTime
+ series []*logproto.GroupedChunkRefs
+ interval bloomshipper.Interval
}
-func partitionRequest(req *logproto.FilterChunkRefRequest) []seriesWithBounds {
- result := make([]seriesWithBounds, 0)
+func partitionRequest(req *logproto.FilterChunkRefRequest) []seriesWithInterval {
+ result := make([]seriesWithInterval, 0)
fromDay, throughDay := truncateDay(req.From), truncateDay(req.Through)
@@ -177,12 +177,12 @@ func partitionRequest(req *logproto.FilterChunkRefRequest) []seriesWithBounds {
}
if len(res) > 0 {
- result = append(result, seriesWithBounds{
- bounds: model.Interval{
+ result = append(result, seriesWithInterval{
+ interval: bloomshipper.Interval{
Start: minTs,
End: maxTs,
},
- table: config.NewDayTime(day),
+ day: config.NewDayTime(day),
series: res,
})
}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index e9776dfef78f..3e55a3ab55aa 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -143,7 +143,7 @@ func TestPartitionRequest(t *testing.T) {
testCases := map[string]struct {
inp *logproto.FilterChunkRefRequest
- exp []seriesWithBounds
+ exp []seriesWithInterval
}{
"empty": {
@@ -151,7 +151,7 @@ func TestPartitionRequest(t *testing.T) {
From: ts.Add(-24 * time.Hour),
Through: ts,
},
- exp: []seriesWithBounds{},
+ exp: []seriesWithInterval{},
},
"all chunks within single day": {
@@ -173,10 +173,10 @@ func TestPartitionRequest(t *testing.T) {
},
},
},
- exp: []seriesWithBounds{
+ exp: []seriesWithInterval{
{
- bounds: model.Interval{Start: ts.Add(-60 * time.Minute), End: ts.Add(-45 * time.Minute)},
- table: config.NewDayTime(mktime("2024-01-24 00:00")),
+ interval: bloomshipper.Interval{Start: ts.Add(-60 * time.Minute), End: ts.Add(-45 * time.Minute)},
+ day: config.NewDayTime(mktime("2024-01-24 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
@@ -214,10 +214,10 @@ func TestPartitionRequest(t *testing.T) {
},
},
},
- exp: []seriesWithBounds{
+ exp: []seriesWithInterval{
{
- bounds: model.Interval{Start: ts.Add(-23 * time.Hour), End: ts.Add(-22 * time.Hour)},
- table: config.NewDayTime(mktime("2024-01-23 00:00")),
+ interval: bloomshipper.Interval{Start: ts.Add(-23 * time.Hour), End: ts.Add(-22 * time.Hour)},
+ day: config.NewDayTime(mktime("2024-01-23 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
@@ -228,8 +228,8 @@ func TestPartitionRequest(t *testing.T) {
},
},
{
- bounds: model.Interval{Start: ts.Add(-2 * time.Hour), End: ts.Add(-1 * time.Hour)},
- table: config.NewDayTime(mktime("2024-01-24 00:00")),
+ interval: bloomshipper.Interval{Start: ts.Add(-2 * time.Hour), End: ts.Add(-1 * time.Hour)},
+ day: config.NewDayTime(mktime("2024-01-24 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x01,
@@ -255,10 +255,10 @@ func TestPartitionRequest(t *testing.T) {
},
},
},
- exp: []seriesWithBounds{
+ exp: []seriesWithInterval{
{
- bounds: model.Interval{Start: ts.Add(-13 * time.Hour), End: ts.Add(-11 * time.Hour)},
- table: config.NewDayTime(mktime("2024-01-23 00:00")),
+ interval: bloomshipper.Interval{Start: ts.Add(-13 * time.Hour), End: ts.Add(-11 * time.Hour)},
+ day: config.NewDayTime(mktime("2024-01-23 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
@@ -269,8 +269,8 @@ func TestPartitionRequest(t *testing.T) {
},
},
{
- bounds: model.Interval{Start: ts.Add(-13 * time.Hour), End: ts.Add(-11 * time.Hour)},
- table: config.NewDayTime(mktime("2024-01-24 00:00")),
+ interval: bloomshipper.Interval{Start: ts.Add(-13 * time.Hour), End: ts.Add(-11 * time.Hour)},
+ day: config.NewDayTime(mktime("2024-01-24 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go
index 9b80fb92de88..afd06b17c966 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer.go
@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/grafana/dskit/multierror"
+
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/util/encoding"
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
index d2b92ed4128e..e07738574f5d 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
@@ -9,9 +9,10 @@ import (
"time"
"github.com/go-kit/log"
- "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
+
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
)
type mockCache[K comparable, V any] struct {
diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver_test.go b/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
index 890c5b03ff3f..b2aa7e60a4b5 100644
--- a/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
@@ -3,8 +3,9 @@ package bloomshipper
import (
"testing"
- v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
func TestResolver_ParseMetaKey(t *testing.T) {
From c39786bd516c1839c4e698c7bed29573eeedca2b Mon Sep 17 00:00:00 2001
From: J Stickler
Date: Wed, 28 Feb 2024 11:49:30 -0500
Subject: [PATCH 126/130] docs: update storage topic to include azure (#12063)
---
.../setup/install/helm/configure-storage/_index.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/docs/sources/setup/install/helm/configure-storage/_index.md b/docs/sources/setup/install/helm/configure-storage/_index.md
index 6a28387932d0..e51e8503fee0 100644
--- a/docs/sources/setup/install/helm/configure-storage/_index.md
+++ b/docs/sources/setup/install/helm/configure-storage/_index.md
@@ -20,9 +20,9 @@ This guide assumes Loki will be installed in one of the modes above and that a `
**To use a managed object store:**
-1. Set the `type` of `storage` in `values.yaml` to `gcs` or `s3`.
+1. In the `values.yaml` file, set the value for `storage.type` to `azure`, `gcs`, or `s3`.
-2. Configure the storage client under `loki.storage.gcs` or `loki.storage.s3`.
+1. Configure the storage client under `loki.storage.azure`, `loki.storage.gcs`, or `loki.storage.s3`.
**To install Minio alongside Loki:**
@@ -41,7 +41,7 @@ This guide assumes Loki will be installed in one of the modes above and that a `
1. Provision an IAM role, policy and S3 bucket as described in [Storage]({{< relref "../../../../storage#aws-deployment-s3-single-store" >}}).
- If the Terraform module was used note the annotation emitted by `terraform output -raw annotation`.
-2. Add the IAM role annotation to the service account in `values.yaml`:
+1. Add the IAM role annotation to the service account in `values.yaml`:
```
serviceAccount:
@@ -49,7 +49,7 @@ This guide assumes Loki will be installed in one of the modes above and that a `
"eks.amazonaws.com/role-arn": "arn:aws:iam:::role/"
```
-3. Configure the storage:
+1. Configure the storage:
```
loki:
From c1fbab5228a6e57c68361c91886ea6ec0806540c Mon Sep 17 00:00:00 2001
From: Trevor Whitney
Date: Wed, 28 Feb 2024 15:26:15 -0700
Subject: [PATCH 127/130] chore(release-2.8.x): release 2.8.9 and 2.8.10
(#12089)
Co-authored-by: loki-gh-app[bot] <160051081+loki-gh-app[bot]@users.noreply.github.com>
---
CHANGELOG.md | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fb09c05dded7..512b9b0bdbb6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -129,6 +129,20 @@
* [10542](https://github.com/grafana/loki/pull/10542) **chaudum**: Remove legacy deployment mode for ingester (Deployment, without WAL) and instead always run them as StatefulSet.
+## [2.8.10](https://github.com/grafana/loki/compare/v2.8.9...v2.8.10) (2024-02-28)
+
+
+### Bug Fixes
+
+* image tag from env and pin release to v1.11.5 ([#12073](https://github.com/grafana/loki/issues/12073)) ([8e11cd7](https://github.com/grafana/loki/commit/8e11cd7a8222a64d60bff30a41e399ddbda3372e))
+
+## [2.8.9](https://github.com/grafana/loki/compare/v2.8.8...v2.8.9) (2024-02-23)
+
+
+### Bug Fixes
+
+* bump alpine base image and go to fix CVEs ([#12026](https://github.com/grafana/loki/issues/12026)) ([196650e](https://github.com/grafana/loki/commit/196650e4c119249016df85a50a2cced521cbe9be))
+
## 2.9.2 (2023-10-16)
### All Changes
From 0ac42e78c2a32980118f8d445aca42362e023eb1 Mon Sep 17 00:00:00 2001
From: J Stickler
Date: Thu, 29 Feb 2024 10:17:05 -0500
Subject: [PATCH 128/130] docs: restore links, update procedure in Quick Start
(#12068)
Co-authored-by: Jack Baldry
---
docs/sources/get-started/quick-start.md | 25 ++++++++++++-------------
1 file changed, 12 insertions(+), 13 deletions(-)
diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md
index ac12c9b22f10..70cbfc2c57d2 100644
--- a/docs/sources/get-started/quick-start.md
+++ b/docs/sources/get-started/quick-start.md
@@ -11,27 +11,27 @@ If you want to experiment with Loki, you can run Loki locally using the Docker C
The Docker Compose configuration instantiates the following components, each in its own container:
-- **Flog** a sample application which generates log lines. [Flog](https://github.com/mingrammer/flog) is a log generator for common log formats.
-- **Promtail** which scrapes the log lines from Flog, and pushes them to Loki through the gateway.
+- **flog** a sample application which generates log lines. [flog](https://github.com/mingrammer/flog) is a log generator for common log formats.
+- **Promtail** which scrapes the log lines from flog, and pushes them to Loki through the gateway.
- **Gateway** (NGINX) which receives requests and redirects them to the appropriate container based on the request's URL.
- One Loki **read** component.
- One Loki **write** component.
- **Minio** an S3-compatible object store which Loki uses to store its index and chunks.
- **Grafana** which provides visualization of the log lines captured within Loki.
-{{< figure max-width="75%" src="/media/docs/loki/get-started-flog.png" caption="Getting started sample application" alt="Getting started sample application">}}
+{{< figure max-width="75%" src="/media/docs/loki/get-started-flog-v2.png" caption="Getting started sample application" alt="Getting started sample application">}}
## Installing Loki and collecting sample logs
Prerequisites
-- Docker
-- Docker Compose
+- [Docker](https://docs.docker.com/install)
+- [Docker Compose](https://docs.docker.com/compose/install)
{{% admonition type="note" %}}
-Note that this quick start assumes you are running Linux.
+This quick start assumes you are running Linux.
{{% /admonition %}}
-**Steps:**
+**To install Loki locally, follow these steps:**
1. Create a directory called `evaluate-loki` for the demo environment. Make `evaluate-loki` your current working directory:
@@ -80,11 +80,11 @@ Once you have collected logs, you will want to view them. You can view your log
The test environment includes [Grafana](https://grafana.com/docs/grafana/latest/), which you can use to query and observe the sample logs generated by the flog application. You can access the Grafana cluster by navigating to [http://localhost:3000](http://localhost:3000). The Grafana instance provided with this demo has a Loki [datasource](https://grafana.com/docs/grafana/latest/datasources/loki/) already configured.
-{{< figure src="/media/docs/loki/grafana-query-builder.png" caption="Grafana Explore" alt="Grafana Explore">}}
+ {{< figure src="/media/docs/loki/grafana-query-builder-v2.png" caption="Grafana Explore" alt="Grafana Explore">}}
1. From the Grafana main menu, click the **Explore** icon (1) to launch the Explore tab. To learn more about Explore, refer the [Explore](https://grafana.com/docs/grafana/latest/explore/) documentation.
-1. From the menu in the dashboard header (2), select the Loki data source. This displays the Loki query editor. In the query editor you use the Loki query language, [LogQL](https://grafana.com/docs/loki/latest/query/), to query your logs.
+1. From the menu in the dashboard header, select the Loki data source (2). This displays the Loki query editor. In the query editor you use the Loki query language, [LogQL](https://grafana.com/docs/loki/latest/query/), to query your logs.
To learn more about the query editor, refer to the [query editor documentation](https://grafana.com/docs/grafana/latest/datasources/loki/query-editor/).
1. The Loki query editor has two modes (3):
@@ -96,7 +96,7 @@ Once you have collected logs, you will want to view them. You can view your log
1. Click **Code** (3) to work in Code mode in the query editor.
- Here are some basic sample queries to get you started using LogQL. Note that these queries assume that you followed the instructions to create a directory called `evaluate-loki`. If you installed in a different directory, you’ll need to modify these queries to match your installation directory. After copying any of these queries into the query editor, click **Run Query** (6) to execute the query.
+ Here are some basic sample queries to get you started using LogQL. Note that these queries assume that you followed the instructions to create a directory called `evaluate-loki`. If you installed in a different directory, you’ll need to modify these queries to match your installation directory. After copying any of these queries into the query editor, click **Run Query** (4) to execute the query.
1. View all the log lines which have the container label "flog":
```bash
@@ -126,11 +126,10 @@ Once you have collected logs, you will want to view them. You can view your log
The final query above is a metric query which returns a time series. This will trigger Grafana to draw a graph of the results. You can change the type of graph for a different view of the data. Click **Bars** to view a bar graph of the data.
1. Click the **Builder** tab (3) to return to Builder mode in the query editor.
- 1. In Builder view, click **Kick start your query**(4).
+ 1. In Builder view, click **Kick start your query**(5).
1. Expand the **Log query starters** section.
1. Select the first choice, **Parse log lines with logfmt parser**, by clicking **Use this query**.
- 1. On the Explore tab, select **container** from the **Label filters** menu then select a container from the **value** menu.
- 1. Click **Run Query**(6).
+ 1. On the Explore tab, click **Label browser**, in the dialog select a container and click **Show logs**.
For a thorough introduction to LogQL, refer to the [LogQL reference](https://grafana.com/docs/loki/latest/query/).
From b24d48d2f668ce5d660be155fee1a01fa71b8027 Mon Sep 17 00:00:00 2001
From: J Stickler
Date: Thu, 29 Feb 2024 17:10:01 -0500
Subject: [PATCH 129/130] docs: Update READMEs (#12061)
---
CONTRIBUTING.md | 9 +++++++++
docs/README.md | 19 +++++++++++++++----
2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b643a46ddf6f..94d664954f6c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -14,6 +14,8 @@ LIDs must be created as a pull request using [this template](docs/sources/commun
## Pull Request Prerequisites/Checklist
+**NOTE:** The Loki team has adopted the use of [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) for commit messages.
+
1. Your PR title is in the form `: Your change`.
1. It does not end the title with punctuation. It will be added in the changelog.
1. It starts with an imperative verb. Example: Fix the latency between System A and System B.
@@ -36,6 +38,8 @@ LIDs must be created as a pull request using [this template](docs/sources/commun
Please document clearly what changed AND what needs to be done in the upgrade guide.
+**NOTE:** A member of the Loki repo maintainers must approve and run the continuous integration (CI) workflows for community contributions.
+
## Setup
A common problem arises in local environments when you want your module to use a locally modified dependency:
@@ -157,3 +161,8 @@ To get a local preview of the documentation:
Then you can go to Docker Desktop settings and open the resources, add the temporary directory path `/tmp`.
> Note that `make docs` uses a lot of memory. If it crashes, increase the memory allocated to Docker and try again.
+
+Also note that PRs are merged to the main branch. If your changes need to be immediately published to the latest release, you must add the appropriate backport label to your PR, for example, `backport-release-2.9.x`. If the changes in your PR can be automatically backported, the backport label will trigger GrafanaBot to create the backport PR, otherwise you will need to create a PR to manually backport your changes.
+
+* [Latest release](https://grafana.com/docs/loki/latest/)
+* [Upcoming release](https://grafana.com/docs/loki/next/), at the tip of the main branch
diff --git a/docs/README.md b/docs/README.md
index a3aa4414ad09..569889c2d645 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -8,6 +8,7 @@ Some key things to know about the Loki documentation source:
- While you can view the documentation in GitHub, GitHub does not render the images or links correctly and cannot render the Hugo specific shortcodes. To read the Loki documentation, see the [Documentation Site](https://grafana.com/docs/loki/latest/).
- If you have a trivial fix or improvement, go ahead and create a pull request.
- If you plan to do something more involved, for example creating a new topic, discuss your ideas on the relevant GitHub issue.
+- Pull requests are merged to main, and published to [Upcoming release](https://grafana.com/docs/loki/next/). If your change needs to be published to the [Latest release](https://grafana.com/docs/loki/latest/) before the next Loki release (that is, it needs to be published immediately), add the appropriate backport label to your PR.
## Contributing
@@ -26,16 +27,26 @@ If you have a GitHub account and you're just making a small fix, for example fix
2. Click the pencil icon.
3. Enter your changes.
4. Click **Commit changes**. GitHub creates a pull request for you.
-5. If this is your first contribution to the Loki repository, you will need to sign the Contributor License Agreement (CLA) before your PR can be accepted.
-6. Add the `type/docs` label to identify your PR as a docs contribution.
-7. If your contribution needs to be added to the current release or previous releases, apply the appropriate `backport` label. You can find more information about backporting in the [Writers' toolkit](https://grafana.com/docs/writers-toolkit/review/backporting/).
+5. The Loki team uses [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) for commit messages. Make sure your commit messages for doc updates start with `doc:`.
+6. If this is your first contribution to the Loki repository, you will need to sign the Contributor License Agreement (CLA) before your PR can be accepted.
+**NOTE:** A member of the Loki repo maintainers must approve and run the continuous integration (CI) workflows for community contributions.
+7. Add the `type/docs` label to identify your PR as a docs contribution. This helps the documentation team track our work.
+8. If your contribution needs to be added to the current release or previous releases, apply the appropriate `backport` label. You can find more information about backporting in the [Writers' toolkit](https://grafana.com/docs/writers-toolkit/review/backporting/).
For larger contributions, for example documenting a new feature or adding a new topic, consider running the project locally to see how the changes look like before making a pull request.
-The docs team has created a [Writer's Toolkit](https://grafana.com/docs/writers-toolkit/) that documents how we write documentation at Grafana Labs. The Writer's toolkit contains information about how we structure documentation at Grafana, including templates for different types of topics, information about Hugo shortcodes that extend markdown to add additional features, and information about linters and other tools that we use to write documentation. The Writers' Toolkit also includes our [Style Guide](https://grafana.com/docs/writers-toolkit/write/style-guide/).
+The docs team has created a [Writers' Toolkit](https://grafana.com/docs/writers-toolkit/) that documents how we write documentation at Grafana Labs. Writers' Toolkit contains information about how we structure documentation at Grafana, including templates for different types of topics, information about Hugo shortcodes that extend markdown to add additional features, and information about linters and other tools that we use to write documentation. Writers' Toolkit also includes our [Style Guide](https://grafana.com/docs/writers-toolkit/write/style-guide/).
Note that in Hugo the structure of the documentation is based on the folder structure of the documentation repository. The URL structure is generated based on the folder structure and file names. Try to avoid moving or renaming files, as this will break cross-references to those files. If you must move or rename files, run `make docs` as described below to find and fix broken links before you submit your pull request.
+## Shared content
+
+**NOTE:** As of Loki/GEL 3.0, there will be shared files between the Loki docs and the GEL docs. The Grafana Enterprise Logs documentation will pull in content from the Loki repo when publishing the GEL docs. Files that are shared between the two doc sets will contain a comment indicating that the content is shared.
+
+For more information about shared content, see the [reuse content](https://grafana.com/docs/writers-toolkit/write/reuse-content/) section of the Writers' Toolkit.
+
+For more information about building and testing documentation, see the [build and review](https://grafana.com/docs/writers-toolkit/review/) section of the Writers' Toolkit.
+
## Testing documentation
Loki uses the static site generator [Hugo](https://gohugo.io/) to generate the documentation. The Loki repository uses a continuous integration (CI) action to sync documentation to the [Grafana website](https://grafana.com/docs/loki/latest). The CI is triggered on every merge to main in the `docs` subfolder.
From 77a2580052ca53c94600901038f7d88234f174e2 Mon Sep 17 00:00:00 2001
From: Paul Rogers <129207811+paul1r@users.noreply.github.com>
Date: Thu, 29 Feb 2024 17:47:21 -0500
Subject: [PATCH 130/130] fix: Add data race protection for the tm.wg var
(#12100)
---
.../stores/shipper/indexshipper/downloads/table_manager.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go
index 21cdbc8aa41c..8d3875afe75c 100644
--- a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go
+++ b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go
@@ -116,7 +116,9 @@ func NewTableManager(cfg Config, openIndexFileFunc index.OpenIndexFileFunc, inde
}
func (tm *tableManager) loop() {
+ tm.tablesMtx.Lock()
tm.wg.Add(1)
+ tm.tablesMtx.Unlock()
defer tm.wg.Done()
syncTicker := time.NewTicker(tm.cfg.SyncInterval)
@@ -151,10 +153,10 @@ func (tm *tableManager) loop() {
func (tm *tableManager) Stop() {
tm.cancel()
- tm.wg.Wait()
tm.tablesMtx.Lock()
defer tm.tablesMtx.Unlock()
+ tm.wg.Wait()
for _, table := range tm.tables {
table.Close()